hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b5e1e4400ece464c042117b04da3b5eae526f760.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2022 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch/utility/device_vector.h"
namespace cupoch {
namespace utility {
#ifdef USE_RMM
void InitializeAllocator(
rmmAllocationMode_t mode,
size_t initial_pool_size,
const std::vector<int> &devices) {
static std::vector<std::shared_ptr<rmm::mr::device_memory_resource>> per_device_memory = {};
static std::vector<int> s_devices = {};
static bool is_initialized = false;
if (is_initialized) {
rmm::mr::set_per_device_resource(rmm::cuda_device_id{0}, nullptr);
for (auto d: s_devices) {
rmm::mr::set_per_device_resource(rmm::cuda_device_id{d}, nullptr);
}
s_devices.clear();
per_device_memory.clear();
}
s_devices = devices;
if (s_devices.empty()) s_devices.push_back(0);
for (auto d: s_devices) {
hipSetDevice(d);
if (mode & CudaManagedMemory) {
auto cuda_mr = std::make_shared<rmm::mr::managed_memory_resource>();
if (mode & PoolAllocation) {
auto pool = rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr, initial_pool_size);
per_device_memory.emplace_back(pool);
} else {
per_device_memory.emplace_back(cuda_mr);
}
} else {
auto cuda_mr = std::make_shared<rmm::mr::cuda_memory_resource>();
if (mode & PoolAllocation) {
auto pool = rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr, initial_pool_size);
per_device_memory.emplace_back(pool);
} else {
per_device_memory.emplace_back(cuda_mr);
}
}
rmm::mr::set_per_device_resource(rmm::cuda_device_id{d}, per_device_memory.back().get());
}
is_initialized = true;
}
#endif
}
}
|
b5e1e4400ece464c042117b04da3b5eae526f760.cu
|
/**
* Copyright (c) 2022 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include "cupoch/utility/device_vector.h"
namespace cupoch {
namespace utility {
#ifdef USE_RMM
void InitializeAllocator(
rmmAllocationMode_t mode,
size_t initial_pool_size,
const std::vector<int> &devices) {
static std::vector<std::shared_ptr<rmm::mr::device_memory_resource>> per_device_memory = {};
static std::vector<int> s_devices = {};
static bool is_initialized = false;
if (is_initialized) {
rmm::mr::set_per_device_resource(rmm::cuda_device_id{0}, nullptr);
for (auto d: s_devices) {
rmm::mr::set_per_device_resource(rmm::cuda_device_id{d}, nullptr);
}
s_devices.clear();
per_device_memory.clear();
}
s_devices = devices;
if (s_devices.empty()) s_devices.push_back(0);
for (auto d: s_devices) {
cudaSetDevice(d);
if (mode & CudaManagedMemory) {
auto cuda_mr = std::make_shared<rmm::mr::managed_memory_resource>();
if (mode & PoolAllocation) {
auto pool = rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr, initial_pool_size);
per_device_memory.emplace_back(pool);
} else {
per_device_memory.emplace_back(cuda_mr);
}
} else {
auto cuda_mr = std::make_shared<rmm::mr::cuda_memory_resource>();
if (mode & PoolAllocation) {
auto pool = rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr, initial_pool_size);
per_device_memory.emplace_back(pool);
} else {
per_device_memory.emplace_back(cuda_mr);
}
}
rmm::mr::set_per_device_resource(rmm::cuda_device_id{d}, per_device_memory.back().get());
}
is_initialized = true;
}
#endif
}
}
|
b5a2fb54833e409de8351e0fd34216ea766059d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent speculative while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
extern "C" {__device__ int g_warpCounter;} // Work counter for persistent threads.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Temporary data stored in shared memory to reduce register pressure.
__shared__ RayStruct shared[32 * MaxBlockHeight + 1];
RayStruct* aux = shared + threadIdx.x + (blockDim.x * threadIdx.y);
// Traversal stack in CUDA thread-local memory.
// Allocate 3 additional entries for spilling rarely used variables.
int traversalStack[STACK_SIZE + 3];
traversalStack[STACK_SIZE + 0] = threadIdx.x; // Forced to local mem => saves a register.
traversalStack[STACK_SIZE + 1] = threadIdx.y;
// traversalStack[STACK_SIZE + 2] holds ray index.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
int stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float u, v; // UV barycentric coordinates
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
__shared__ volatile int rayCountArray[MaxBlockHeight]; // Number of rays in the local pool.
nextRayArray[threadIdx.y] = 0;
rayCountArray[threadIdx.y] = 0;
// Persistent threads: fetch and process rays in a loop.
do
{
int tidx = traversalStack[STACK_SIZE + 0]; // threadIdx.x
int widx = traversalStack[STACK_SIZE + 1]; // threadIdx.y
volatile int& localPoolRayCount = rayCountArray[widx];
volatile int& localPoolNextRay = nextRayArray[widx];
// Local pool is empty => fetch new rays from the global pool using lane 0.
if (tidx == 0 && localPoolRayCount <= 0)
{
localPoolNextRay = atomicAdd(&g_warpCounter, LOAD_BALANCER_BATCH_SIZE);
localPoolRayCount = LOAD_BALANCER_BATCH_SIZE;
}
// Pick 32 rays from the local pool.
// Out of work => done.
{
int rayidx = localPoolNextRay + tidx;
if (rayidx >= numRays)
break;
if (tidx == 0)
{
localPoolNextRay += 32;
localPoolRayCount -= 32;
}
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
traversalStack[STACK_SIZE + 2] = rayidx; // Spill.
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = 0;
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Decide where to go next.
// Differs from "while-while" because this just happened to produce better code here.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeAddr = __float_as_int(cnodes.x); // stored as int
int nodeAddrChild1 = __float_as_int(cnodes.y); // stored as int
if (!traverseChild1) { nodeAddrChild1 = EntrypointSentinel; }
if (!traverseChild0) { nodeAddr = nodeAddrChild1; nodeAddrChild1 = EntrypointSentinel; }
// Neither child was intersected => pop.
if (nodeAddr == EntrypointSentinel)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// Both children were intersected => push the farther one.
else if (nodeAddrChild1 != EntrypointSentinel)
{
if (c1min < c0min)
swap(nodeAddr, nodeAddrChild1);
++stackPtr;
traversalStack[stackPtr] = nodeAddrChild1;
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// All SIMD lanes have found a leaf => process them.
if(!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Fetch the start and end of the triangle list.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf=FETCH_TEXTURE(nodesA, (-leafAddr-1)*4+3, float4);
#else
float4 leaf=FETCH_TEXTURE(nodesD, (-nodeAddr-1), float4);
#endif
int triAddr = __float_as_int(leaf.x); // stored as int
int triAddr2 = __float_as_int(leaf.y); // stored as int
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = FETCH_TEXTURE(triIndices, hitIndex, int);
STORE_RESULT(traversalStack[STACK_SIZE + 2], hitIndex, hitT, u, v);
} while(aux); // persistent threads (always true)
}
//------------------------------------------------------------------------
|
b5a2fb54833e409de8351e0fd34216ea766059d6.cu
|
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent speculative while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
extern "C" {__device__ int g_warpCounter;} // Work counter for persistent threads.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Temporary data stored in shared memory to reduce register pressure.
__shared__ RayStruct shared[32 * MaxBlockHeight + 1];
RayStruct* aux = shared + threadIdx.x + (blockDim.x * threadIdx.y);
// Traversal stack in CUDA thread-local memory.
// Allocate 3 additional entries for spilling rarely used variables.
int traversalStack[STACK_SIZE + 3];
traversalStack[STACK_SIZE + 0] = threadIdx.x; // Forced to local mem => saves a register.
traversalStack[STACK_SIZE + 1] = threadIdx.y;
// traversalStack[STACK_SIZE + 2] holds ray index.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
int stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float u, v; // UV barycentric coordinates
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
__shared__ volatile int rayCountArray[MaxBlockHeight]; // Number of rays in the local pool.
nextRayArray[threadIdx.y] = 0;
rayCountArray[threadIdx.y] = 0;
// Persistent threads: fetch and process rays in a loop.
do
{
int tidx = traversalStack[STACK_SIZE + 0]; // threadIdx.x
int widx = traversalStack[STACK_SIZE + 1]; // threadIdx.y
volatile int& localPoolRayCount = rayCountArray[widx];
volatile int& localPoolNextRay = nextRayArray[widx];
// Local pool is empty => fetch new rays from the global pool using lane 0.
if (tidx == 0 && localPoolRayCount <= 0)
{
localPoolNextRay = atomicAdd(&g_warpCounter, LOAD_BALANCER_BATCH_SIZE);
localPoolRayCount = LOAD_BALANCER_BATCH_SIZE;
}
// Pick 32 rays from the local pool.
// Out of work => done.
{
int rayidx = localPoolNextRay + tidx;
if (rayidx >= numRays)
break;
if (tidx == 0)
{
localPoolNextRay += 32;
localPoolRayCount -= 32;
}
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
traversalStack[STACK_SIZE + 2] = rayidx; // Spill.
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = 0;
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Decide where to go next.
// Differs from "while-while" because this just happened to produce better code here.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeAddr = __float_as_int(cnodes.x); // stored as int
int nodeAddrChild1 = __float_as_int(cnodes.y); // stored as int
if (!traverseChild1) { nodeAddrChild1 = EntrypointSentinel; }
if (!traverseChild0) { nodeAddr = nodeAddrChild1; nodeAddrChild1 = EntrypointSentinel; }
// Neither child was intersected => pop.
if (nodeAddr == EntrypointSentinel)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// Both children were intersected => push the farther one.
else if (nodeAddrChild1 != EntrypointSentinel)
{
if (c1min < c0min)
swap(nodeAddr, nodeAddrChild1);
++stackPtr;
traversalStack[stackPtr] = nodeAddrChild1;
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// All SIMD lanes have found a leaf => process them.
if(!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Fetch the start and end of the triangle list.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf=FETCH_TEXTURE(nodesA, (-leafAddr-1)*4+3, float4);
#else
float4 leaf=FETCH_TEXTURE(nodesD, (-nodeAddr-1), float4);
#endif
int triAddr = __float_as_int(leaf.x); // stored as int
int triAddr2 = __float_as_int(leaf.y); // stored as int
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = FETCH_TEXTURE(triIndices, hitIndex, int);
STORE_RESULT(traversalStack[STACK_SIZE + 2], hitIndex, hitT, u, v);
} while(aux); // persistent threads (always true)
}
//------------------------------------------------------------------------
|
14c362770a750d9bfdf0ce0074f315d70701ebd9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=32 --gridDim=32
// IMPERIAL EDIT: this kernel was commented out
#include "common.h"
/*__global__ __device__ void rayCalc(float3 * A, float3 * u, float * prof, uint imageW, uint imageH, float df, float tPixel)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint id = x + y * imageW;
if( x < imageW && y < imageH )
{
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
A[id] = R.A;
u[id] = R.u;
prof[id] = 1000.0f;
}
}*/
|
14c362770a750d9bfdf0ce0074f315d70701ebd9.cu
|
//pass
//--blockDim=32 --gridDim=32
// IMPERIAL EDIT: this kernel was commented out
#include "common.h"
/*__global__ __device__ void rayCalc(float3 * A, float3 * u, float * prof, uint imageW, uint imageH, float df, float tPixel)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint id = x + y * imageW;
if( x < imageW && y < imageH )
{
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
A[id] = R.A;
u[id] = R.u;
prof[id] = 1000.0f;
}
}*/
|
9f7996048e66075e19ba69efea8c96c5b34e465e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mvs.cuh"
#include "kernel_helpers_hip.cuh"
#include "random_gen.cuh"
#include "reduce_hip.cuh"
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <library/cpp/cuda/wrappers/cub_include.h>
#include _CUB_INCLUDE(hipcub/hipcub.hpp)
#include _CUB_INCLUDE(cub/block/block_radix_sort.cuh)
#include _CUB_INCLUDE(cub/block/block_scan.cuh)
namespace NKernel {
__forceinline__ __device__ float GetSingleProbability(
float derivativeAbsoluteValue,
float threshold
) {
return (derivativeAbsoluteValue > threshold) ? 1.0f : __fdividef(derivativeAbsoluteValue, threshold);
}
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__ __forceinline__ void GetThreshold(
float takenFraction,
float (&candidates)[ITEMS_PER_THREAD],
float (&prefixSum)[ITEMS_PER_THREAD],
ui32 size,
float* threshold
) {
const ui32 thisBlockSize = min(BLOCK_THREADS * ITEMS_PER_THREAD, size);
const float sampleSize = thisBlockSize * takenFraction;
__shared__ ui32 argMinBorder[BLOCK_THREADS];
__shared__ float minBorder[BLOCK_THREADS];
argMinBorder[threadIdx.x] = 0;
minBorder[threadIdx.x] = thisBlockSize;
__shared__ bool exit;
if (ITEMS_PER_THREAD * threadIdx.x <= thisBlockSize - 1 &&
ITEMS_PER_THREAD * (threadIdx.x + 1) > thisBlockSize - 1) {
const ui32 localId = thisBlockSize - 1 - threadIdx.x * ITEMS_PER_THREAD;
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD; ++idx) {
if (idx == localId) {
if (candidates[idx] <= prefixSum[idx] / sampleSize) {
*threshold = prefixSum[idx] / sampleSize;
exit = true;
} else {
exit = false;
}
}
}
}
__syncthreads();
if (exit) {
return;
}
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
// Here cub::BlockRadixsort and hipcub::BlockScan numeration is used
const ui32 i = k + ITEMS_PER_THREAD * threadIdx.x;
if (i < thisBlockSize) {
const float takenSize = prefixSum[k] / candidates[k] + thisBlockSize - i - 1;
if (takenSize >= sampleSize) { // takenSize is non-growing function
minBorder[threadIdx.x] = takenSize;
argMinBorder[threadIdx.x] = i;
}
}
}
__syncthreads();
#pragma unroll
for (int s = BLOCK_THREADS >> 1; s >= 32; s >>= 1) {
if (threadIdx.x < s)
{
if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) {
argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s];
minBorder[threadIdx.x] = minBorder[threadIdx.x + s];
}
}
__syncthreads();
}
if (threadIdx.x < 32) {
__syncwarp();
#pragma unroll
for (int s = 32 >> 1; s > 0; s >>= 1) {
if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) {
argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s];
minBorder[threadIdx.x] = minBorder[threadIdx.x + s];
}
__syncwarp();
}
}
__syncthreads();
if (
ITEMS_PER_THREAD * threadIdx.x <= argMinBorder[0] &&
ITEMS_PER_THREAD * (threadIdx.x + 1) > argMinBorder[0]
) {
const int localId = argMinBorder[0] - threadIdx.x * ITEMS_PER_THREAD;
const int denom = sampleSize - (thisBlockSize - argMinBorder[0] - 1);
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
minBorder[i] = prefixSum[i];
}
*threshold = minBorder[localId] / (denom);
}
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__device__ __forceinline__ void CalculateThreshold(
float takenFraction,
const float* candidates,
ui32 size,
float* threshold
) {
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>;
using BlockScan = hipcub::BlockScan<float, BLOCK_THREADS>;
__shared__ union {
typename BlockRadixSort::TempStorage Sort;
typename BlockScan::TempStorage Scan;
} tempStorage;
// Our current block's offset
int blockOffset = blockIdx.x * TILE_SIZE;
// Per-thread tile items
float items[ITEMS_PER_THREAD];
float scanItems[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
int idx = blockOffset + threadIdx.x;
const float inf = std::numeric_limits<float>::max();
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
if (idx < size) {
items[k] = StreamLoad(candidates + idx);
} else {
items[k] = inf;
}
idx += BLOCK_THREADS;
}
__syncthreads();
BlockRadixSort(tempStorage.Sort).Sort(items, 8);
__syncthreads();
BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems);
__syncthreads();
GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>(
takenFraction,
items,
scanItems,
size - blockOffset,
threshold
);
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__launch_bounds__(BLOCK_THREADS, 1)
__global__ void CalculateThresholdImpl(
float takenFraction,
float* candidates,
ui32 size,
float* threshold
) {
CalculateThreshold<ITEMS_PER_THREAD, BLOCK_THREADS>(
takenFraction, candidates, size, threshold + blockIdx.x
);
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__global__ void MvsBootstrapRadixSortImpl(
float takenFraction,
float lambda,
float* weights,
const float* ders,
ui32 size,
const ui64* seeds,
ui32 seedSize
) {
const int blockOffset = blockIdx.x * BLOCK_THREADS * ITEMS_PER_THREAD;
using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>;
using BlockScan = hipcub::BlockScan<float, BLOCK_THREADS>;
__shared__ union {
typename BlockRadixSort::TempStorage Sort;
typename BlockScan::TempStorage Scan;
} tempStorage;
// Per-thread tile items
float weightsPerThread[ITEMS_PER_THREAD];
float items[ITEMS_PER_THREAD];
float scanItems[ITEMS_PER_THREAD];
const int idx = blockOffset + threadIdx.x;
const float inf = sqrtf(std::numeric_limits<float>::max()) - 2 * lambda;
cub::CacheModifiedInputIterator<cub::LOAD_CS, float> inputIterator(ders);
cub::LoadDirectWarpStriped(
idx,
inputIterator,
weightsPerThread,
size,
inf
);
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
weightsPerThread[k] = sqrtf(
fmaf(weightsPerThread[k], weightsPerThread[k], lambda)
);
items[k] = weightsPerThread[k];
}
__syncthreads();
BlockRadixSort(tempStorage.Sort).Sort(items, 8);
__syncthreads();
BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems);
__syncthreads();
__shared__ float threshold;
GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>(
takenFraction,
items,
scanItems,
size - blockOffset,
&threshold
);
__syncthreads();
// Set Mvs weights
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
ui64 s = __ldg(seeds + i % seedSize) + blockIdx.x;
const float eps = std::numeric_limits<float>::epsilon();
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
const float probability = GetSingleProbability(weightsPerThread[k], threshold);
weightsPerThread[k] = (probability > eps && NextUniformF(&s) < probability)
? __fdividef(1.0f, probability)
: 0.0f;
}
cub::CacheModifiedOutputIterator<cub::STORE_CS, float> outputIterator(weights);
cub::StoreDirectWarpStriped(
idx,
outputIterator,
weightsPerThread,
size
);
}
void MvsBootstrapRadixSort(
const float takenFraction,
const float lambda,
float* weights,
const float* ders,
ui32 size,
const ui64* seeds,
ui32 seedSize,
TCudaStream stream
) {
const ui32 blockThreads = 512;
const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads;
const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD);
{
hipLaunchKernelGGL(( MvsBootstrapRadixSortImpl<SCAN_ITEMS_PER_THREAD, blockThreads>) , dim3(numBlocks), dim3(blockThreads), 0, stream ,
takenFraction, lambda, weights, ders, size, seeds, seedSize
);
}
}
void CalculateMvsThreshold(
const float takenFraction,
float* candidates,
ui32 size,
float* threshold,
TCudaStream stream
) {
const ui32 blockThreads = 512;
const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads;
const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD);
{
hipLaunchKernelGGL(( CalculateThresholdImpl<SCAN_ITEMS_PER_THREAD, blockThreads>) , dim3(numBlocks), dim3(blockThreads), 0, stream ,
takenFraction, candidates, size, threshold
);
}
}
}
|
9f7996048e66075e19ba69efea8c96c5b34e465e.cu
|
#include "mvs.cuh"
#include "kernel_helpers.cuh"
#include "random_gen.cuh"
#include "reduce.cuh"
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <library/cpp/cuda/wrappers/cub_include.h>
#include _CUB_INCLUDE(cub/cub.cuh)
#include _CUB_INCLUDE(cub/block/block_radix_sort.cuh)
#include _CUB_INCLUDE(cub/block/block_scan.cuh)
namespace NKernel {
__forceinline__ __device__ float GetSingleProbability(
float derivativeAbsoluteValue,
float threshold
) {
return (derivativeAbsoluteValue > threshold) ? 1.0f : __fdividef(derivativeAbsoluteValue, threshold);
}
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__ __forceinline__ void GetThreshold(
float takenFraction,
float (&candidates)[ITEMS_PER_THREAD],
float (&prefixSum)[ITEMS_PER_THREAD],
ui32 size,
float* threshold
) {
const ui32 thisBlockSize = min(BLOCK_THREADS * ITEMS_PER_THREAD, size);
const float sampleSize = thisBlockSize * takenFraction;
__shared__ ui32 argMinBorder[BLOCK_THREADS];
__shared__ float minBorder[BLOCK_THREADS];
argMinBorder[threadIdx.x] = 0;
minBorder[threadIdx.x] = thisBlockSize;
__shared__ bool exit;
if (ITEMS_PER_THREAD * threadIdx.x <= thisBlockSize - 1 &&
ITEMS_PER_THREAD * (threadIdx.x + 1) > thisBlockSize - 1) {
const ui32 localId = thisBlockSize - 1 - threadIdx.x * ITEMS_PER_THREAD;
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD; ++idx) {
if (idx == localId) {
if (candidates[idx] <= prefixSum[idx] / sampleSize) {
*threshold = prefixSum[idx] / sampleSize;
exit = true;
} else {
exit = false;
}
}
}
}
__syncthreads();
if (exit) {
return;
}
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
// Here cub::BlockRadixsort and cub::BlockScan numeration is used
const ui32 i = k + ITEMS_PER_THREAD * threadIdx.x;
if (i < thisBlockSize) {
const float takenSize = prefixSum[k] / candidates[k] + thisBlockSize - i - 1;
if (takenSize >= sampleSize) { // takenSize is non-growing function
minBorder[threadIdx.x] = takenSize;
argMinBorder[threadIdx.x] = i;
}
}
}
__syncthreads();
#pragma unroll
for (int s = BLOCK_THREADS >> 1; s >= 32; s >>= 1) {
if (threadIdx.x < s)
{
if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) {
argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s];
minBorder[threadIdx.x] = minBorder[threadIdx.x + s];
}
}
__syncthreads();
}
if (threadIdx.x < 32) {
__syncwarp();
#pragma unroll
for (int s = 32 >> 1; s > 0; s >>= 1) {
if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) {
argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s];
minBorder[threadIdx.x] = minBorder[threadIdx.x + s];
}
__syncwarp();
}
}
__syncthreads();
if (
ITEMS_PER_THREAD * threadIdx.x <= argMinBorder[0] &&
ITEMS_PER_THREAD * (threadIdx.x + 1) > argMinBorder[0]
) {
const int localId = argMinBorder[0] - threadIdx.x * ITEMS_PER_THREAD;
const int denom = sampleSize - (thisBlockSize - argMinBorder[0] - 1);
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
minBorder[i] = prefixSum[i];
}
*threshold = minBorder[localId] / (denom);
}
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__device__ __forceinline__ void CalculateThreshold(
float takenFraction,
const float* candidates,
ui32 size,
float* threshold
) {
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>;
using BlockScan = cub::BlockScan<float, BLOCK_THREADS>;
__shared__ union {
typename BlockRadixSort::TempStorage Sort;
typename BlockScan::TempStorage Scan;
} tempStorage;
// Our current block's offset
int blockOffset = blockIdx.x * TILE_SIZE;
// Per-thread tile items
float items[ITEMS_PER_THREAD];
float scanItems[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
int idx = blockOffset + threadIdx.x;
const float inf = std::numeric_limits<float>::max();
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
if (idx < size) {
items[k] = StreamLoad(candidates + idx);
} else {
items[k] = inf;
}
idx += BLOCK_THREADS;
}
__syncthreads();
BlockRadixSort(tempStorage.Sort).Sort(items, 8);
__syncthreads();
BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems);
__syncthreads();
GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>(
takenFraction,
items,
scanItems,
size - blockOffset,
threshold
);
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__launch_bounds__(BLOCK_THREADS, 1)
__global__ void CalculateThresholdImpl(
float takenFraction,
float* candidates,
ui32 size,
float* threshold
) {
CalculateThreshold<ITEMS_PER_THREAD, BLOCK_THREADS>(
takenFraction, candidates, size, threshold + blockIdx.x
);
}
template <int ITEMS_PER_THREAD, int BLOCK_THREADS>
__global__ void MvsBootstrapRadixSortImpl(
float takenFraction,
float lambda,
float* weights,
const float* ders,
ui32 size,
const ui64* seeds,
ui32 seedSize
) {
const int blockOffset = blockIdx.x * BLOCK_THREADS * ITEMS_PER_THREAD;
using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>;
using BlockScan = cub::BlockScan<float, BLOCK_THREADS>;
__shared__ union {
typename BlockRadixSort::TempStorage Sort;
typename BlockScan::TempStorage Scan;
} tempStorage;
// Per-thread tile items
float weightsPerThread[ITEMS_PER_THREAD];
float items[ITEMS_PER_THREAD];
float scanItems[ITEMS_PER_THREAD];
const int idx = blockOffset + threadIdx.x;
const float inf = sqrtf(std::numeric_limits<float>::max()) - 2 * lambda;
cub::CacheModifiedInputIterator<cub::LOAD_CS, float> inputIterator(ders);
cub::LoadDirectWarpStriped(
idx,
inputIterator,
weightsPerThread,
size,
inf
);
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
weightsPerThread[k] = sqrtf(
fmaf(weightsPerThread[k], weightsPerThread[k], lambda)
);
items[k] = weightsPerThread[k];
}
__syncthreads();
BlockRadixSort(tempStorage.Sort).Sort(items, 8);
__syncthreads();
BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems);
__syncthreads();
__shared__ float threshold;
GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>(
takenFraction,
items,
scanItems,
size - blockOffset,
&threshold
);
__syncthreads();
// Set Mvs weights
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
ui64 s = __ldg(seeds + i % seedSize) + blockIdx.x;
const float eps = std::numeric_limits<float>::epsilon();
#pragma unroll
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
const float probability = GetSingleProbability(weightsPerThread[k], threshold);
weightsPerThread[k] = (probability > eps && NextUniformF(&s) < probability)
? __fdividef(1.0f, probability)
: 0.0f;
}
cub::CacheModifiedOutputIterator<cub::STORE_CS, float> outputIterator(weights);
cub::StoreDirectWarpStriped(
idx,
outputIterator,
weightsPerThread,
size
);
}
void MvsBootstrapRadixSort(
const float takenFraction,
const float lambda,
float* weights,
const float* ders,
ui32 size,
const ui64* seeds,
ui32 seedSize,
TCudaStream stream
) {
const ui32 blockThreads = 512;
const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads;
const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD);
{
MvsBootstrapRadixSortImpl<SCAN_ITEMS_PER_THREAD, blockThreads> <<< numBlocks, blockThreads, 0, stream >>> (
takenFraction, lambda, weights, ders, size, seeds, seedSize
);
}
}
void CalculateMvsThreshold(
const float takenFraction,
float* candidates,
ui32 size,
float* threshold,
TCudaStream stream
) {
const ui32 blockThreads = 512;
const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads;
const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD);
{
CalculateThresholdImpl<SCAN_ITEMS_PER_THREAD, blockThreads> <<< numBlocks, blockThreads, 0, stream >>> (
takenFraction, candidates, size, threshold
);
}
}
}
|
6a3c748512e2d6f7d8ccc9316db51016789d285d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#include "hoomd/extern/util/mgpucontext.h"
#include "hoomd/extern/kernels/localitysort.cuh"
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store chage in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N + n_ghost)
return;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0,0,0,0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx+1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) )
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() ||
jb < 0 || jb >= (int)ci.getH() ||
kb < 0 || kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
atomicMax(&(*d_conditions).x, size+1);
}
}
hipError_t gpu_compute_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size)
{
hipError_t err;
err = hipMemsetAsync(d_cell_size, 0, sizeof(unsigned int)*ci.getNumElements(),0);
if (err != hipSuccess)
return err;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = (N+n_ghost)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_compute_cell_list_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_cell_size,
d_xyzf,
d_tdb,
d_cell_orientation,
d_cell_idx,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width);
return hipSuccess;
}
// ********************* Following are helper functions, structs, etc for the 1x optimized cell list build
//! \internal
/*! \param a First element
\param b Second element
The two elements are swapped
*/
template<class T> __device__ inline void swap(T & a, T & b)
{
T tmp = a;
a = b;
b = tmp;
}
//! \internal
/*! \param shared Pointer to shared memory to bitonic sort
*/
template<class T, unsigned int block_size> __device__ inline void bitonic_sort(T *shared)
{
unsigned int tid = threadIdx.x;
// Parallel bitonic sort.
for (int k = 2; k <= block_size; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
else
{
if (shared[tid] < shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
}
__syncthreads();
}
}
}
//! \internal
/*! \brief Pair a particle and its assigned bin together for sorting
*/
struct bin_id_pair
{
unsigned int bin; //!< Cell index
unsigned int id; //!< Particle id
unsigned int start_offset; //!< Write offset
};
//! \internal
/*! \param bin Cell index
\param id Particle id
*/
__device__ inline bin_id_pair make_bin_id_pair(unsigned int bin, unsigned int id)
{
bin_id_pair res;
res.bin = bin;
res.id = id;
res.start_offset = 0;
return res;
}
//! \internal
/*! \param a First element
\param b Second element
*/
__device__ inline bool operator< (const bin_id_pair& a, const bin_id_pair& b)
{
if (a.bin == b.bin)
return (a.id < b.id);
else
return (a.bin < b.bin);
}
//! \internal
/*! \param a First element
\param b Second element
*/
__device__ inline bool operator> (const bin_id_pair& a, const bin_id_pair& b)
{
if (a.bin == b.bin)
return (a.id > b.id);
else
return (a.bin > b.bin);
}
//! \internal
/*! \param temp Temporary array in shared memory to scan
*/
template<class T, unsigned int block_size> __device__ inline void scan_naive(T *temp)
{
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
for (int offset = 1; offset < block_size; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*block_size+thid] = temp[pin*block_size+thid];
if (thid >= offset)
temp[pout*block_size+thid] += temp[pin*block_size+thid - offset];
}
__syncthreads();
// bring the data back to the initial array
if (pout == 1)
{
pout = 1 - pout;
pin = 1 - pout;
temp[pout*block_size+thid] = temp[pin*block_size+thid];
__syncthreads();
}
}
__global__ void gpu_fill_indices_kernel(
unsigned int cl_size,
uint2 *d_idx,
unsigned int *d_sort_permutation,
unsigned int *d_cell_idx,
unsigned int *d_cell_size,
Index3D ci,
Index2D cli
)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size) return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2 a, const uint2 b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
__global__ void gpu_apply_sorted_cell_list_order(
unsigned int cl_size,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param mgpu_context ModernGPU context
*/
hipError_t gpu_sort_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
uint2 *d_sort_idx,
unsigned int *d_sort_permutation,
const Index3D ci,
const Index2D cli,
mgpu::ContextPtr mgpu_context)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
hipLaunchKernelGGL(( gpu_fill_indices_kernel), dim3(grid), dim3(threads), 0, 0,
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
mgpu::LocalitySortPairs(d_sort_idx, d_sort_permutation, cli.getNumElements(), *mgpu_context, comp_less_uint2());
// apply sorted order
hipLaunchKernelGGL(( gpu_apply_sorted_cell_list_order), dim3(grid), dim3(threads), 0, 0,
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
hipMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
hipMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), hipMemcpyDeviceToDevice);
if (d_tdb)
{
hipMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
hipMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
}
return hipSuccess;
}
|
6a3c748512e2d6f7d8ccc9316db51016789d285d.cu
|
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#include "hoomd/extern/util/mgpucontext.h"
#include "hoomd/extern/kernels/localitysort.cuh"
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store chage in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N + n_ghost)
return;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0,0,0,0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx+1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) )
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() ||
jb < 0 || jb >= (int)ci.getH() ||
kb < 0 || kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
atomicMax(&(*d_conditions).x, size+1);
}
}
cudaError_t gpu_compute_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size)
{
cudaError_t err;
err = cudaMemsetAsync(d_cell_size, 0, sizeof(unsigned int)*ci.getNumElements(),0);
if (err != cudaSuccess)
return err;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = (N+n_ghost)/run_block_size + 1;
gpu_compute_cell_list_kernel<<<n_blocks, run_block_size>>>(d_cell_size,
d_xyzf,
d_tdb,
d_cell_orientation,
d_cell_idx,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width);
return cudaSuccess;
}
// ********************* Following are helper functions, structs, etc for the 1x optimized cell list build
//! \internal
/*! \param a First element
\param b Second element
The two elements are swapped
*/
template<class T> __device__ inline void swap(T & a, T & b)
{
T tmp = a;
a = b;
b = tmp;
}
//! \internal
/*! \param shared Pointer to shared memory to bitonic sort
*/
template<class T, unsigned int block_size> __device__ inline void bitonic_sort(T *shared)
{
unsigned int tid = threadIdx.x;
// Parallel bitonic sort.
for (int k = 2; k <= block_size; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
else
{
if (shared[tid] < shared[ixj])
{
swap(shared[tid], shared[ixj]);
}
}
}
__syncthreads();
}
}
}
//! \internal
/*! \brief Pair a particle and its assigned bin together for sorting
*/
struct bin_id_pair
{
unsigned int bin; //!< Cell index
unsigned int id; //!< Particle id
unsigned int start_offset; //!< Write offset
};
//! \internal
/*! \param bin Cell index
\param id Particle id
*/
__device__ inline bin_id_pair make_bin_id_pair(unsigned int bin, unsigned int id)
{
bin_id_pair res;
res.bin = bin;
res.id = id;
res.start_offset = 0;
return res;
}
//! \internal
/*! \param a First element
\param b Second element
*/
__device__ inline bool operator< (const bin_id_pair& a, const bin_id_pair& b)
{
if (a.bin == b.bin)
return (a.id < b.id);
else
return (a.bin < b.bin);
}
//! \internal
/*! \param a First element
\param b Second element
*/
__device__ inline bool operator> (const bin_id_pair& a, const bin_id_pair& b)
{
if (a.bin == b.bin)
return (a.id > b.id);
else
return (a.bin > b.bin);
}
//! \internal
/*! \param temp Temporary array in shared memory to scan
*/
template<class T, unsigned int block_size> __device__ inline void scan_naive(T *temp)
{
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
for (int offset = 1; offset < block_size; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*block_size+thid] = temp[pin*block_size+thid];
if (thid >= offset)
temp[pout*block_size+thid] += temp[pin*block_size+thid - offset];
}
__syncthreads();
// bring the data back to the initial array
if (pout == 1)
{
pout = 1 - pout;
pin = 1 - pout;
temp[pout*block_size+thid] = temp[pin*block_size+thid];
__syncthreads();
}
}
__global__ void gpu_fill_indices_kernel(
unsigned int cl_size,
uint2 *d_idx,
unsigned int *d_sort_permutation,
unsigned int *d_cell_idx,
unsigned int *d_cell_size,
Index3D ci,
Index2D cli
)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size) return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2 a, const uint2 b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
__global__ void gpu_apply_sorted_cell_list_order(
unsigned int cl_size,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param mgpu_context ModernGPU context
*/
cudaError_t gpu_sort_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
uint2 *d_sort_idx,
unsigned int *d_sort_permutation,
const Index3D ci,
const Index2D cli,
mgpu::ContextPtr mgpu_context)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
gpu_fill_indices_kernel<<<grid, threads>>>
(
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
mgpu::LocalitySortPairs(d_sort_idx, d_sort_permutation, cli.getNumElements(), *mgpu_context, comp_less_uint2());
// apply sorted order
gpu_apply_sorted_cell_list_order<<<grid, threads>>>(
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
cudaMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
if (d_tdb)
{
cudaMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
cudaMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
}
return cudaSuccess;
}
|
923cb379c082186d90b743bce97e555f489dfd5c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "svmTrain.h"
#include "parse.hpp"
#include <iostream>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <cblas.h>
#include <vector>
#include <string.h>
#include <getopt.h>
#include <math.h>
#include <vector>
#include "CycleTimer.h"
#include "svmTrainMain.hpp"
#include "cache.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/inner_product.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
using namespace std;
// Scalars
const float alpha = 1;
const float beta = 0;
//functor for obtaining the I sets
struct arbitrary_functor
{
const float C;
arbitrary_functor(float _c) : C(_c) {}
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<3>(t).I_1 = thrust::get<3>(t).I_2 = thrust::get<4>(t);
//i_helper new;
// I_set[i] = Alpha[i], Y[i] , f[i], I_set1[i], I_set2[i];
if(thrust::get<0>(t) == 0) {
if(thrust::get<1>(t) == 1) {
thrust::get<3>(t).f_1 = thrust::get<2>(t);
thrust::get<3>(t).f_2 = -1000000000;
}
else {
thrust::get<3>(t).f_2 = thrust::get<2>(t);
thrust::get<3>(t).f_1 = 1000000000;
}
} else if(thrust::get<0>(t) == C) {
if(thrust::get<1>(t) == -1) {
thrust::get<3>(t).f_1 = thrust::get<2>(t);
thrust::get<3>(t).f_2 = -1000000000;
}
else {
thrust::get<3>(t).f_2 = thrust::get<2>(t);
thrust::get<3>(t).f_1 = 1000000000;
}
} else {
thrust::get<3>(t).f_1 = thrust::get<3>(t).f_2 = thrust::get<2>(t);
}
}
};
//functor for performing the f_update step in GPU using Thrust
struct update_functor
{
const float gamma;
const float alpha_lo_old;
const float alpha_hi_old;
const float alpha_lo_new;
const float alpha_hi_new;
const int y_lo;
const int y_hi;
const float x_hi_sq;
const float x_lo_sq;
update_functor(float _gamma, float _alpha_lo_old, float _alpha_hi_old, float _alpha_lo_new, float _alpha_hi_new, int _y_lo, int _y_hi, float _x_hi_sq, float _x_lo_sq) :
gamma(_gamma),
alpha_lo_old(_alpha_lo_old),
alpha_hi_old(_alpha_hi_old),
alpha_lo_new(_alpha_lo_new),
alpha_hi_new(_alpha_hi_new),
y_lo(_y_lo),
y_hi(_y_hi),
x_hi_sq(_x_hi_sq),
x_lo_sq(_x_lo_sq)
{}
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
float rbf_hi = expf(-1 * gamma * (thrust::get<2>(t) + x_hi_sq - (2*thrust::get<0>(t)) ));
//printf("%f\t%f\n" , -1 * gamma * (thrust::get<2>(t) + x_hi_sq - (2*thrust::get<0>(t)) ) , rbf_hi);
float rbf_lo = expf(-1 * gamma * (thrust::get<2>(t) + x_lo_sq - (2*thrust::get<1>(t)) ));
//printf("%f\t%f\n" , -1 * gamma * (thrust::get<2>(t) + x_lo_sq - (2*thrust::get<1>(t)) ) , rbf_lo);
float delta = (((alpha_hi_new-alpha_hi_old)*y_hi*rbf_hi) + ((alpha_lo_new - alpha_lo_old)*y_lo*rbf_lo));
thrust::get<3>(t) += delta;
}
};
//cache lookup
thrust::device_vector<float>& SvmTrain::lookup_cache(int I_idx, bool& cache_hit) {
//static thrust::device_vector<float> g_hi_dotprod (state.num_train_data);
thrust::device_vector<float>* lookup = lineCache->lookup(I_idx);
if(lookup != NULL){
cache_hit = true;
return *lookup;
}
else {
cache_hit = false;
return lineCache->get_new_cache_line(I_idx);
}
}
//Allocate x_hi, x_lo and an empty vector in device i
void SvmTrain::init_cuda_handles() {
hipblasStatus_t status;
hipError_t cudaStat;
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
cout << "CUBLAS initialization failed\n";
exit(EXIT_FAILURE);
}
cudaStat = hipStreamCreate(&stream1);
cudaStat = hipStreamCreate(&stream2);
if (cudaStat == hipErrorInvalidValue) {
cout << "CUDA stream initialization failed\n";
exit(EXIT_FAILURE);
}
}
void SvmTrain::destroy_cuda_handles() {
hipblasDestroy(handle);
}
int SvmTrain::update_f(int I_lo, int I_hi, int y_lo, int y_hi, float alpha_lo_old, float alpha_hi_old, float alpha_lo_new, float alpha_hi_new) {
// unsigned long long t1,t2;
// t1 = CycleTimer::currentTicks();
// cout << I_hi << "," << I_lo << "\n";
// lineCache -> dump_map_contents();
bool hi_hit;
bool lo_hit;
thrust::device_vector<float>& g_hi_dotprod = lookup_cache(I_hi, hi_hit);
float* raw_g_hi_dotprod = thrust::raw_pointer_cast(&g_hi_dotprod[0]);
//printf("%x, %x\n",raw_g_hi_dotprod, raw_g_lo_dotprod);
//cout << "UPDATE_F: " << t2-t1 << "\n";
//t1 = t2;
if(!hi_hit) {
//cout << "HI MISS\n";
hipblasSetStream(handle, stream1);
// t2 = CycleTimer::currentTicks();
// cout << "UPDATE_F, INIT: " << t2-t1 << "\n";
// t1 = t2;
hipblasSgemv( handle, HIPBLAS_OP_T, state.num_attributes, num_train_data, &alpha, &raw_g_x[matrix_start], state.num_attributes, &raw_g_x[I_hi * state.num_attributes], 1, &beta, raw_g_hi_dotprod, 1 );
// t2 = CycleTimer::currentTicks();
// cout << "SGEMV 1: " << t2-t1 << "\n";
// t1 = t2;
}
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_hi_dotprod[i] << ",";
}
cout << "\n-------------\n";*/
thrust::device_vector<float>& g_lo_dotprod = lookup_cache(I_lo, lo_hit);
float* raw_g_lo_dotprod = thrust::raw_pointer_cast(&g_lo_dotprod[0]);
if(!lo_hit) {
//cout << "LO MISS \n";
hipblasSetStream(handle, stream2);
hipblasSgemv( handle, HIPBLAS_OP_T, state.num_attributes, num_train_data, &alpha, &raw_g_x[matrix_start], state.num_attributes, &raw_g_x[I_lo * state.num_attributes], 1, &beta, raw_g_lo_dotprod, 1 );
}
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_lo_dotprod[i] << ",";
}
cout << "\n-------------\n";*/
//printf("G_X_SQ: %x - %x\n", thrust::raw_pointer_cast(&g_x_sq[0]), thrust::raw_pointer_cast(&g_x_sq[state.num_train_data-1]));
//printf("G_F: %x - %x\n", thrust::raw_pointer_cast(&g_f[0]), thrust::raw_pointer_cast(&g_f[state.num_train_data-1]));
//printf("G_X_SQ: %x - %x\n", thrust::raw_pointer_cast(&g_x_sq[0]), thrust::raw_pointer_cast(&g_x_sq[state.num_train_data-1]));
//printf("%x, %x\n", thrust::raw_pointer_cast(&g_hi_dotprod[state.num_attributes-1]), thrust::raw_pointer_cast(&g_lo_dotprod[state.num_attributes-1]));
// t2 = CycleTimer::currentTicks();
// cout << "SGEMV 2: " << t2-t1 << "\n";
// t1 = t2;
float x_hi_sq = g_x_sq[I_hi];
float x_lo_sq = g_x_sq[I_lo];
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(g_hi_dotprod.begin(), g_lo_dotprod.begin(), g_x_sq.begin()+start, g_f.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_hi_dotprod.end(), g_lo_dotprod.end(), g_x_sq.begin()+end, g_f.end())),
update_functor(state.gamma, alpha_lo_old, alpha_hi_old, alpha_lo_new, alpha_hi_new, y_lo, y_hi, x_hi_sq, x_lo_sq));
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_f[i] << ",";
}
cout << "\n-------------\n";*/
//prev_hi = I_hi;
//prev_lo = I_lo;
// t2 = CycleTimer::currentTicks();
// cout << "UPDATE_FUNCTOR: " << t2-t1 << "\n";
// t1 = t2;
/////////////////////////////////////////////////////////
// t2 = CycleTimer::currentTicks();
// cout << "Destroy: " << t2-t1 << "\n";
// t1 = t2;
return 0;
}
//Parameterized constructor
SvmTrain::SvmTrain(int n_data, int d) {
num_train_data = n_data;
start = d;
end = d+n_data;
matrix_start = start*state.num_attributes;
matrix_end = end*state.num_attributes;
init.I_1 = -1;
init.I_2 = -1;
init.f_1 = 1000000000;
init.f_2 = -1000000000;
}
void SvmTrain::setup(std::vector<float>& raw_x, std::vector<int>& raw_y) {
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for DPSVM\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
x = thrust::host_vector<float>(raw_x);
y = thrust::host_vector<int>(raw_y);
//cout << "PRE X COPY: \n";
//Copy x and y to device
g_x = thrust::device_vector<float>(x.begin(), x.end()) ;
//cout << "POST X COPY: \n";
//Initialize alpha on device
g_alpha = thrust::device_vector<float>(state.num_train_data, 0);
//cout << "POST ALPHA: \n";
init_cuda_handles();
//cout << "POST HANDLE INIT: \n";
g_x_sq = thrust::device_vector<float>(state.num_train_data);
//cout << "POST X_SQ: \n";
for( int i = 0; i < state.num_train_data; i++ )
{
g_x_sq[i] = thrust::inner_product(&g_x[i*state.num_attributes], &g_x[i*state.num_attributes] + state.num_attributes, &g_x[i*state.num_attributes], 0.0f);
}
//cout << "POST X_SQ INIT: \n";
raw_g_x = thrust::raw_pointer_cast(&g_x[0]);
//cout << "POST G_X: \n";
//ONLY THE FOLLOWING USE INFO PERTAINING TO THIS PARTICULAR SPLIT
g_y = thrust::device_vector<int>(y.begin()+start, y.begin()+end);
//cout << "POST G_Y: \n";
// Initialize f on device
g_f = thrust::device_vector<float>(num_train_data);
thrust::transform(g_y.begin(), g_y.end(), g_f.begin(), thrust::negate<float>());
//cout << "POST G_F INIT: \n";
lineCache = new myCache(state.cache_size, num_train_data);
//cout << "POST LINECACHE: \n";
rv = new float[4];
g_I_set = thrust::device_vector<i_helper>(num_train_data);
first = thrust::counting_iterator<int>(start);
last = first + num_train_data;
}
// t2 = CycleTimer::currentTicks();
//cout << "POST INIT, PRE G_X_SQ CALC: " << t2 - t1 << "\n";
// t1 = t2;
struct my_maxmin : public thrust::binary_function<i_helper, i_helper, i_helper> {
__host__ __device__
i_helper operator()(i_helper x, i_helper y) {
i_helper rv;//(fminf(x.I_1, y.I_1), fmaxf(x.I_2, y.I_2));
if(x.f_1 < y.f_1) {
rv.I_1 = x.I_1;
rv.f_1 = x.f_1;
}
else { //if (x.f_1 > y.f_1) {
rv.I_1 = y.I_1;
rv.f_1 = y.f_1;
}
/*else {
if(x.I_1 < y.I_1) {
rv.I_1 = x.I_1;
rv.f_1 = x.f_1;
}
else {
rv.I_1 = y.I_1;
rv.f_1 = y.f_1;
}
}*/
if(x.f_2 > y.f_2) {
rv.I_2 = x.I_2;
rv.f_2 = x.f_2;
}
else { //if(x.f_2 < y.f_2) {
rv.I_2 = y.I_2;
rv.f_2 = y.f_2;
}
/*else {
if(x.I_2 < y.I_2) {
rv.I_2 = x.I_2;
rv.f_2 = x.f_2;
}
else {
rv.I_2 = y.I_2;
rv.f_2 = y.f_2;
}
}*/
return rv;
}
};
void SvmTrain::train_step1() {
//Set up I_set1 and I_set2
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(g_alpha.begin() + start, g_y.begin(), g_f.begin(), g_I_set.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(g_alpha.begin() + end, g_y.end(), g_f.end(), g_I_set.end(), last)),
arbitrary_functor(state.c));
i_helper res = thrust::reduce(g_I_set.begin(), g_I_set.end(), init, my_maxmin());
rv[0] = res.I_1;
rv[1] = res.I_2;
rv[2] = res.f_1;
rv[3] = res.f_2;
}
void SvmTrain::train_step2(int I_hi, int I_lo, float alpha_hi_new, float alpha_lo_new) {
float alpha_lo_old = g_alpha[I_lo];
float alpha_hi_old = g_alpha[I_hi];
int y_hi = y[I_hi];
int y_lo = y[I_lo];
g_alpha[I_lo] = alpha_lo_new;
g_alpha[I_hi] = alpha_hi_new;
update_f(I_lo, I_hi, y_lo, y_hi, alpha_lo_old, alpha_hi_old, alpha_lo_new, alpha_hi_new);
}
/*float SvmTrain::get_train_accuracy() {
int num_correct = 0;
//thrust::host_vector<float> alpha = g_alpha;
//float* raw_alpha = thrust::raw_pointer_cast(&alpha[0]);
for(int i=0; i<state.num_train_data; i++) {
//cout << "Iter: " << i << "\n";
hipblasSgemv(t_handle, HIPBLAS_OP_T, state.num_attributes, new_size, &alpha, &raw_g_x_c[0], state.num_attributes, &raw_g_x[i * state.num_attributes], 1, &beta, raw_g_t_dp, 1 );
float i_sq = g_x_sq[i];
float dual = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(g_y_c.begin(), g_alpha_c.begin(), g_x_sq_c.begin(), g_t_dp.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_y_c.end(), g_alpha_c.end(), g_x_sq_c.end(), g_t_dp.end())),
test_functor<thrust::tuple<int, float, float, float> >(i_sq), 0, thrust::plus<float>());
//dual += y[j]*raw_alpha[j]*rbf_kernel(j,i);
// }
//}
dual += b;
int result = 1;
if(dual < 0) {
result = -1;
}
if(result == y[i]) {
num_correct++;
}
}
return ((float)num_correct/(state.num_train_data));
}*/
struct is_not_sv
{
template <typename Tuple>
__host__ __device__
bool operator()(const Tuple& t)
{
return (thrust::get<0>(t) <= 0);
}
};
template <typename Tuple>
struct test_functor : public thrust::unary_function<float,Tuple> {
const float i_sq;
const float gamma;
test_functor(float _i_sq, float _gamma) :
i_sq(_i_sq),
gamma(_gamma)
{}
__host__ __device__ float operator()(const Tuple& t) const
{
return (thrust::get<0>(t) * thrust::get<1>(t) * expf(-1 * gamma * (thrust::get<2>(t) + i_sq - (2*thrust::get<3>(t)))));
}
};
void SvmTrain::test_setup() {
g_alpha_c = g_alpha;
g_y_c = y;
g_x_sq_c = g_x_sq;
g_sv_indices = thrust::device_vector<int>(state.num_train_data);
thrust::sequence(g_sv_indices.begin(), g_sv_indices.end());
aggregate_sv();
g_t_dp = thrust::device_vector<float>(new_size);
raw_g_t_dp = thrust::raw_pointer_cast(&g_t_dp[0]);
hipblasStatus_t status;
status = hipblasCreate(&t_handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
cout << "CUBLAS initialization failed\n";
exit(EXIT_FAILURE);
}
}
void SvmTrain::aggregate_sv() {
new_size = thrust::remove_if(thrust::device,
thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.begin(), g_y_c.begin(), g_x_sq_c.begin(), g_sv_indices.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.end(), g_y_c.end(), g_x_sq_c.end(), g_sv_indices.end())),
is_not_sv())
- thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.begin(), g_y_c.begin(),
g_x_sq_c.begin(), g_sv_indices.begin()));
cout << "Number of SVs: " << new_size << "\n";
g_alpha_c.resize(new_size);
g_y_c.resize(new_size);
g_x_sq_c.resize(new_size);
g_sv_indices.resize(new_size);
thrust::host_vector<int> temp_indices = g_sv_indices;
thrust::host_vector<float> temp_x(new_size * state.num_attributes);
for(int i = 0 ; i < new_size; i++) {
int idx = temp_indices[i];
for(int j = 0; j < state.num_attributes; j++){
temp_x[i*state.num_attributes + j] = x[idx*state.num_attributes + j];
}
}
g_x_c = temp_x;
raw_g_x_c = thrust::raw_pointer_cast(&g_x_c[0]);
}
float SvmTrain::get_train_accuracy() {
int num_correct = 0;
for(int i=0; i<state.num_train_data; i++) {
hipblasSgemv(t_handle, HIPBLAS_OP_T, state.num_attributes, new_size, &alpha, &raw_g_x_c[0], state.num_attributes, &raw_g_x[i * state.num_attributes], 1, &beta, raw_g_t_dp, 1 );
float i_sq = g_x_sq[i];
float dual = 0.0f;
dual = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(g_y_c.begin(), g_alpha_c.begin(), g_x_sq_c.begin(), g_t_dp.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_y_c.end(), g_alpha_c.end(), g_x_sq_c.end(), g_t_dp.end())),
test_functor<thrust::tuple<int, float, float, float> >(i_sq, state.gamma), 0.0f, thrust::plus<float>());
dual -= b;
int result = 1;
if(dual < 0.0f) {
result = -1;
}
if(result == y[i]) {
num_correct++;
}
}
return ((float)num_correct/(state.num_train_data));
}
void SvmTrain::destroy_t_cuda_handles() {
hipblasDestroy(t_handle);
}
float SvmTrain::clip_value(float num, float low, float high) {
if(num < low) {
return low;
} else if(num > high) {
return high;
}
return num;
}
void SvmTrain::get_x(float* x, float* x_copy, int idx, int num_attributes) {
int ctr = 0;
int start_index = (idx*num_attributes);
int end_index = start_index+num_attributes;
for(int i = start_index; i < end_index; i++) {
x_copy[ctr++] = x[i];
}
}
float SvmTrain::rbf_kernel(int i1, int i2){
float* i2_copy = new float[state.num_attributes];
float* raw_i1 = thrust::raw_pointer_cast(&x[i1*state.num_attributes]);
float* raw_i2 = thrust::raw_pointer_cast(&x[i2*state.num_attributes]);
get_x(raw_i2, i2_copy, 0, state.num_attributes);
cblas_saxpy(state.num_attributes, -1, raw_i1, 1, i2_copy, 1);
float norm_sq = cblas_sdot(state.num_attributes, i2_copy, 1, i2_copy, 1);
float result = (float)exp(-1 *(float)state.gamma*norm_sq);
delete [] i2_copy;
return result;
}
|
923cb379c082186d90b743bce97e555f489dfd5c.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "svmTrain.h"
#include "parse.hpp"
#include <iostream>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda.h>
#include <cblas.h>
#include <vector>
#include <string.h>
#include <getopt.h>
#include <math.h>
#include <vector>
#include "CycleTimer.h"
#include "svmTrainMain.hpp"
#include "cache.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/inner_product.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
using namespace std;
// Scalars
const float alpha = 1;
const float beta = 0;
//functor for obtaining the I sets
struct arbitrary_functor
{
const float C;
arbitrary_functor(float _c) : C(_c) {}
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<3>(t).I_1 = thrust::get<3>(t).I_2 = thrust::get<4>(t);
//i_helper new;
// I_set[i] = Alpha[i], Y[i] , f[i], I_set1[i], I_set2[i];
if(thrust::get<0>(t) == 0) {
if(thrust::get<1>(t) == 1) {
thrust::get<3>(t).f_1 = thrust::get<2>(t);
thrust::get<3>(t).f_2 = -1000000000;
}
else {
thrust::get<3>(t).f_2 = thrust::get<2>(t);
thrust::get<3>(t).f_1 = 1000000000;
}
} else if(thrust::get<0>(t) == C) {
if(thrust::get<1>(t) == -1) {
thrust::get<3>(t).f_1 = thrust::get<2>(t);
thrust::get<3>(t).f_2 = -1000000000;
}
else {
thrust::get<3>(t).f_2 = thrust::get<2>(t);
thrust::get<3>(t).f_1 = 1000000000;
}
} else {
thrust::get<3>(t).f_1 = thrust::get<3>(t).f_2 = thrust::get<2>(t);
}
}
};
//functor for performing the f_update step in GPU using Thrust
struct update_functor
{
const float gamma;
const float alpha_lo_old;
const float alpha_hi_old;
const float alpha_lo_new;
const float alpha_hi_new;
const int y_lo;
const int y_hi;
const float x_hi_sq;
const float x_lo_sq;
update_functor(float _gamma, float _alpha_lo_old, float _alpha_hi_old, float _alpha_lo_new, float _alpha_hi_new, int _y_lo, int _y_hi, float _x_hi_sq, float _x_lo_sq) :
gamma(_gamma),
alpha_lo_old(_alpha_lo_old),
alpha_hi_old(_alpha_hi_old),
alpha_lo_new(_alpha_lo_new),
alpha_hi_new(_alpha_hi_new),
y_lo(_y_lo),
y_hi(_y_hi),
x_hi_sq(_x_hi_sq),
x_lo_sq(_x_lo_sq)
{}
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
float rbf_hi = expf(-1 * gamma * (thrust::get<2>(t) + x_hi_sq - (2*thrust::get<0>(t)) ));
//printf("%f\t%f\n" , -1 * gamma * (thrust::get<2>(t) + x_hi_sq - (2*thrust::get<0>(t)) ) , rbf_hi);
float rbf_lo = expf(-1 * gamma * (thrust::get<2>(t) + x_lo_sq - (2*thrust::get<1>(t)) ));
//printf("%f\t%f\n" , -1 * gamma * (thrust::get<2>(t) + x_lo_sq - (2*thrust::get<1>(t)) ) , rbf_lo);
float delta = (((alpha_hi_new-alpha_hi_old)*y_hi*rbf_hi) + ((alpha_lo_new - alpha_lo_old)*y_lo*rbf_lo));
thrust::get<3>(t) += delta;
}
};
//cache lookup
thrust::device_vector<float>& SvmTrain::lookup_cache(int I_idx, bool& cache_hit) {
//static thrust::device_vector<float> g_hi_dotprod (state.num_train_data);
thrust::device_vector<float>* lookup = lineCache->lookup(I_idx);
if(lookup != NULL){
cache_hit = true;
return *lookup;
}
else {
cache_hit = false;
return lineCache->get_new_cache_line(I_idx);
}
}
//Allocate x_hi, x_lo and an empty vector in device i
void SvmTrain::init_cuda_handles() {
cublasStatus_t status;
cudaError_t cudaStat;
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cout << "CUBLAS initialization failed\n";
exit(EXIT_FAILURE);
}
cudaStat = cudaStreamCreate(&stream1);
cudaStat = cudaStreamCreate(&stream2);
if (cudaStat == cudaErrorInvalidValue) {
cout << "CUDA stream initialization failed\n";
exit(EXIT_FAILURE);
}
}
void SvmTrain::destroy_cuda_handles() {
cublasDestroy(handle);
}
int SvmTrain::update_f(int I_lo, int I_hi, int y_lo, int y_hi, float alpha_lo_old, float alpha_hi_old, float alpha_lo_new, float alpha_hi_new) {
// unsigned long long t1,t2;
// t1 = CycleTimer::currentTicks();
// cout << I_hi << "," << I_lo << "\n";
// lineCache -> dump_map_contents();
bool hi_hit;
bool lo_hit;
thrust::device_vector<float>& g_hi_dotprod = lookup_cache(I_hi, hi_hit);
float* raw_g_hi_dotprod = thrust::raw_pointer_cast(&g_hi_dotprod[0]);
//printf("%x, %x\n",raw_g_hi_dotprod, raw_g_lo_dotprod);
//cout << "UPDATE_F: " << t2-t1 << "\n";
//t1 = t2;
if(!hi_hit) {
//cout << "HI MISS\n";
cublasSetStream(handle, stream1);
// t2 = CycleTimer::currentTicks();
// cout << "UPDATE_F, INIT: " << t2-t1 << "\n";
// t1 = t2;
cublasSgemv( handle, CUBLAS_OP_T, state.num_attributes, num_train_data, &alpha, &raw_g_x[matrix_start], state.num_attributes, &raw_g_x[I_hi * state.num_attributes], 1, &beta, raw_g_hi_dotprod, 1 );
// t2 = CycleTimer::currentTicks();
// cout << "SGEMV 1: " << t2-t1 << "\n";
// t1 = t2;
}
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_hi_dotprod[i] << ",";
}
cout << "\n-------------\n";*/
thrust::device_vector<float>& g_lo_dotprod = lookup_cache(I_lo, lo_hit);
float* raw_g_lo_dotprod = thrust::raw_pointer_cast(&g_lo_dotprod[0]);
if(!lo_hit) {
//cout << "LO MISS \n";
cublasSetStream(handle, stream2);
cublasSgemv( handle, CUBLAS_OP_T, state.num_attributes, num_train_data, &alpha, &raw_g_x[matrix_start], state.num_attributes, &raw_g_x[I_lo * state.num_attributes], 1, &beta, raw_g_lo_dotprod, 1 );
}
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_lo_dotprod[i] << ",";
}
cout << "\n-------------\n";*/
//printf("G_X_SQ: %x - %x\n", thrust::raw_pointer_cast(&g_x_sq[0]), thrust::raw_pointer_cast(&g_x_sq[state.num_train_data-1]));
//printf("G_F: %x - %x\n", thrust::raw_pointer_cast(&g_f[0]), thrust::raw_pointer_cast(&g_f[state.num_train_data-1]));
//printf("G_X_SQ: %x - %x\n", thrust::raw_pointer_cast(&g_x_sq[0]), thrust::raw_pointer_cast(&g_x_sq[state.num_train_data-1]));
//printf("%x, %x\n", thrust::raw_pointer_cast(&g_hi_dotprod[state.num_attributes-1]), thrust::raw_pointer_cast(&g_lo_dotprod[state.num_attributes-1]));
// t2 = CycleTimer::currentTicks();
// cout << "SGEMV 2: " << t2-t1 << "\n";
// t1 = t2;
float x_hi_sq = g_x_sq[I_hi];
float x_lo_sq = g_x_sq[I_lo];
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(g_hi_dotprod.begin(), g_lo_dotprod.begin(), g_x_sq.begin()+start, g_f.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_hi_dotprod.end(), g_lo_dotprod.end(), g_x_sq.begin()+end, g_f.end())),
update_functor(state.gamma, alpha_lo_old, alpha_hi_old, alpha_lo_new, alpha_hi_new, y_lo, y_hi, x_hi_sq, x_lo_sq));
/*cout << "----------------\n";
for (int i = 100 ; i < 130; i++) {
cout << g_f[i] << ",";
}
cout << "\n-------------\n";*/
//prev_hi = I_hi;
//prev_lo = I_lo;
// t2 = CycleTimer::currentTicks();
// cout << "UPDATE_FUNCTOR: " << t2-t1 << "\n";
// t1 = t2;
/////////////////////////////////////////////////////////
// t2 = CycleTimer::currentTicks();
// cout << "Destroy: " << t2-t1 << "\n";
// t1 = t2;
return 0;
}
//Parameterized constructor
SvmTrain::SvmTrain(int n_data, int d) {
num_train_data = n_data;
start = d;
end = d+n_data;
matrix_start = start*state.num_attributes;
matrix_end = end*state.num_attributes;
init.I_1 = -1;
init.I_2 = -1;
init.f_1 = 1000000000;
init.f_2 = -1000000000;
}
void SvmTrain::setup(std::vector<float>& raw_x, std::vector<int>& raw_y) {
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for DPSVM\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
x = thrust::host_vector<float>(raw_x);
y = thrust::host_vector<int>(raw_y);
//cout << "PRE X COPY: \n";
//Copy x and y to device
g_x = thrust::device_vector<float>(x.begin(), x.end()) ;
//cout << "POST X COPY: \n";
//Initialize alpha on device
g_alpha = thrust::device_vector<float>(state.num_train_data, 0);
//cout << "POST ALPHA: \n";
init_cuda_handles();
//cout << "POST HANDLE INIT: \n";
g_x_sq = thrust::device_vector<float>(state.num_train_data);
//cout << "POST X_SQ: \n";
for( int i = 0; i < state.num_train_data; i++ )
{
g_x_sq[i] = thrust::inner_product(&g_x[i*state.num_attributes], &g_x[i*state.num_attributes] + state.num_attributes, &g_x[i*state.num_attributes], 0.0f);
}
//cout << "POST X_SQ INIT: \n";
raw_g_x = thrust::raw_pointer_cast(&g_x[0]);
//cout << "POST G_X: \n";
//ONLY THE FOLLOWING USE INFO PERTAINING TO THIS PARTICULAR SPLIT
g_y = thrust::device_vector<int>(y.begin()+start, y.begin()+end);
//cout << "POST G_Y: \n";
// Initialize f on device
g_f = thrust::device_vector<float>(num_train_data);
thrust::transform(g_y.begin(), g_y.end(), g_f.begin(), thrust::negate<float>());
//cout << "POST G_F INIT: \n";
lineCache = new myCache(state.cache_size, num_train_data);
//cout << "POST LINECACHE: \n";
rv = new float[4];
g_I_set = thrust::device_vector<i_helper>(num_train_data);
first = thrust::counting_iterator<int>(start);
last = first + num_train_data;
}
// t2 = CycleTimer::currentTicks();
//cout << "POST INIT, PRE G_X_SQ CALC: " << t2 - t1 << "\n";
// t1 = t2;
struct my_maxmin : public thrust::binary_function<i_helper, i_helper, i_helper> {
__host__ __device__
i_helper operator()(i_helper x, i_helper y) {
i_helper rv;//(fminf(x.I_1, y.I_1), fmaxf(x.I_2, y.I_2));
if(x.f_1 < y.f_1) {
rv.I_1 = x.I_1;
rv.f_1 = x.f_1;
}
else { //if (x.f_1 > y.f_1) {
rv.I_1 = y.I_1;
rv.f_1 = y.f_1;
}
/*else {
if(x.I_1 < y.I_1) {
rv.I_1 = x.I_1;
rv.f_1 = x.f_1;
}
else {
rv.I_1 = y.I_1;
rv.f_1 = y.f_1;
}
}*/
if(x.f_2 > y.f_2) {
rv.I_2 = x.I_2;
rv.f_2 = x.f_2;
}
else { //if(x.f_2 < y.f_2) {
rv.I_2 = y.I_2;
rv.f_2 = y.f_2;
}
/*else {
if(x.I_2 < y.I_2) {
rv.I_2 = x.I_2;
rv.f_2 = x.f_2;
}
else {
rv.I_2 = y.I_2;
rv.f_2 = y.f_2;
}
}*/
return rv;
}
};
void SvmTrain::train_step1() {
//Set up I_set1 and I_set2
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(g_alpha.begin() + start, g_y.begin(), g_f.begin(), g_I_set.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(g_alpha.begin() + end, g_y.end(), g_f.end(), g_I_set.end(), last)),
arbitrary_functor(state.c));
i_helper res = thrust::reduce(g_I_set.begin(), g_I_set.end(), init, my_maxmin());
rv[0] = res.I_1;
rv[1] = res.I_2;
rv[2] = res.f_1;
rv[3] = res.f_2;
}
void SvmTrain::train_step2(int I_hi, int I_lo, float alpha_hi_new, float alpha_lo_new) {
float alpha_lo_old = g_alpha[I_lo];
float alpha_hi_old = g_alpha[I_hi];
int y_hi = y[I_hi];
int y_lo = y[I_lo];
g_alpha[I_lo] = alpha_lo_new;
g_alpha[I_hi] = alpha_hi_new;
update_f(I_lo, I_hi, y_lo, y_hi, alpha_lo_old, alpha_hi_old, alpha_lo_new, alpha_hi_new);
}
/*float SvmTrain::get_train_accuracy() {
int num_correct = 0;
//thrust::host_vector<float> alpha = g_alpha;
//float* raw_alpha = thrust::raw_pointer_cast(&alpha[0]);
for(int i=0; i<state.num_train_data; i++) {
//cout << "Iter: " << i << "\n";
cublasSgemv(t_handle, CUBLAS_OP_T, state.num_attributes, new_size, &alpha, &raw_g_x_c[0], state.num_attributes, &raw_g_x[i * state.num_attributes], 1, &beta, raw_g_t_dp, 1 );
float i_sq = g_x_sq[i];
float dual = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(g_y_c.begin(), g_alpha_c.begin(), g_x_sq_c.begin(), g_t_dp.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_y_c.end(), g_alpha_c.end(), g_x_sq_c.end(), g_t_dp.end())),
test_functor<thrust::tuple<int, float, float, float> >(i_sq), 0, thrust::plus<float>());
//dual += y[j]*raw_alpha[j]*rbf_kernel(j,i);
// }
//}
dual += b;
int result = 1;
if(dual < 0) {
result = -1;
}
if(result == y[i]) {
num_correct++;
}
}
return ((float)num_correct/(state.num_train_data));
}*/
struct is_not_sv
{
template <typename Tuple>
__host__ __device__
bool operator()(const Tuple& t)
{
return (thrust::get<0>(t) <= 0);
}
};
template <typename Tuple>
struct test_functor : public thrust::unary_function<float,Tuple> {
const float i_sq;
const float gamma;
test_functor(float _i_sq, float _gamma) :
i_sq(_i_sq),
gamma(_gamma)
{}
__host__ __device__ float operator()(const Tuple& t) const
{
return (thrust::get<0>(t) * thrust::get<1>(t) * expf(-1 * gamma * (thrust::get<2>(t) + i_sq - (2*thrust::get<3>(t)))));
}
};
void SvmTrain::test_setup() {
g_alpha_c = g_alpha;
g_y_c = y;
g_x_sq_c = g_x_sq;
g_sv_indices = thrust::device_vector<int>(state.num_train_data);
thrust::sequence(g_sv_indices.begin(), g_sv_indices.end());
aggregate_sv();
g_t_dp = thrust::device_vector<float>(new_size);
raw_g_t_dp = thrust::raw_pointer_cast(&g_t_dp[0]);
cublasStatus_t status;
status = cublasCreate(&t_handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cout << "CUBLAS initialization failed\n";
exit(EXIT_FAILURE);
}
}
void SvmTrain::aggregate_sv() {
new_size = thrust::remove_if(thrust::device,
thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.begin(), g_y_c.begin(), g_x_sq_c.begin(), g_sv_indices.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.end(), g_y_c.end(), g_x_sq_c.end(), g_sv_indices.end())),
is_not_sv())
- thrust::make_zip_iterator(thrust::make_tuple(g_alpha_c.begin(), g_y_c.begin(),
g_x_sq_c.begin(), g_sv_indices.begin()));
cout << "Number of SVs: " << new_size << "\n";
g_alpha_c.resize(new_size);
g_y_c.resize(new_size);
g_x_sq_c.resize(new_size);
g_sv_indices.resize(new_size);
thrust::host_vector<int> temp_indices = g_sv_indices;
thrust::host_vector<float> temp_x(new_size * state.num_attributes);
for(int i = 0 ; i < new_size; i++) {
int idx = temp_indices[i];
for(int j = 0; j < state.num_attributes; j++){
temp_x[i*state.num_attributes + j] = x[idx*state.num_attributes + j];
}
}
g_x_c = temp_x;
raw_g_x_c = thrust::raw_pointer_cast(&g_x_c[0]);
}
float SvmTrain::get_train_accuracy() {
int num_correct = 0;
for(int i=0; i<state.num_train_data; i++) {
cublasSgemv(t_handle, CUBLAS_OP_T, state.num_attributes, new_size, &alpha, &raw_g_x_c[0], state.num_attributes, &raw_g_x[i * state.num_attributes], 1, &beta, raw_g_t_dp, 1 );
float i_sq = g_x_sq[i];
float dual = 0.0f;
dual = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(g_y_c.begin(), g_alpha_c.begin(), g_x_sq_c.begin(), g_t_dp.begin())),
thrust::make_zip_iterator(thrust::make_tuple(g_y_c.end(), g_alpha_c.end(), g_x_sq_c.end(), g_t_dp.end())),
test_functor<thrust::tuple<int, float, float, float> >(i_sq, state.gamma), 0.0f, thrust::plus<float>());
dual -= b;
int result = 1;
if(dual < 0.0f) {
result = -1;
}
if(result == y[i]) {
num_correct++;
}
}
return ((float)num_correct/(state.num_train_data));
}
void SvmTrain::destroy_t_cuda_handles() {
cublasDestroy(t_handle);
}
float SvmTrain::clip_value(float num, float low, float high) {
if(num < low) {
return low;
} else if(num > high) {
return high;
}
return num;
}
void SvmTrain::get_x(float* x, float* x_copy, int idx, int num_attributes) {
int ctr = 0;
int start_index = (idx*num_attributes);
int end_index = start_index+num_attributes;
for(int i = start_index; i < end_index; i++) {
x_copy[ctr++] = x[i];
}
}
float SvmTrain::rbf_kernel(int i1, int i2){
float* i2_copy = new float[state.num_attributes];
float* raw_i1 = thrust::raw_pointer_cast(&x[i1*state.num_attributes]);
float* raw_i2 = thrust::raw_pointer_cast(&x[i2*state.num_attributes]);
get_x(raw_i2, i2_copy, 0, state.num_attributes);
cblas_saxpy(state.num_attributes, -1, raw_i1, 1, i2_copy, 1);
float norm_sq = cblas_sdot(state.num_attributes, i2_copy, 1, i2_copy, 1);
float result = (float)exp(-1 *(float)state.gamma*norm_sq);
delete [] i2_copy;
return result;
}
|
908b289dc34a1f6873a4be712c8d9545f5c941b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "FindNeighbors.h"
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "OpenSteer/MemoryBackend.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
findNeighborsKernel(VehicleData* vehicleData, int* indices, int* vehicles, NeighborData* neighbors, float radius);
OpenSteer::FindNeighbors::FindNeighbors(float radius)
{
grid = NULL;
d_neighborData = NULL;
d_indices = NULL;
d_vehicles = NULL;
threadsPerBlock = 128;
this->radius = radius;
}
OpenSteer::FindNeighbors::~FindNeighbors() {}
void OpenSteer::FindNeighbors::init()
{
grid = new Grid();
// device memory for neighbor data
mem_size_neighbor_data = getNumberOfAgents()*sizeof(NeighborData);
hipError_t retval = hipMalloc((void **)&d_neighborData, mem_size_neighbor_data);
if (retval != hipSuccess)
cout << "Error while allocating d_neighborData memory: " << hipGetErrorString(retval) << endl;
// device memory for neighbor indices
mem_size_neighbor_indices = grid->numOfCells()*sizeof(int);
retval = hipMalloc((void **)&d_indices, mem_size_neighbor_indices);
if (retval != hipSuccess)
cout << "Error while allocating d_indices memory: " << hipGetErrorString(retval) << endl;
// device memory for neighbor agents
mem_size_neighbor_vehicles = getNumberOfAgents()*sizeof(int);
retval = hipMalloc((void **)&d_vehicles, mem_size_neighbor_vehicles);
if (retval != hipSuccess)
cout << "Error while allocating d_agents memory: " << hipGetErrorString(retval) << endl;
}
void OpenSteer::FindNeighbors::run()
{
MemoryBackend *mb = getMemoryBackend();
for (int i = 0; i < getNumberOfAgents(); i++) {
grid->save(mb->position(i).x, mb->position(i).y, mb->position(i).z, i);
}
hipMemcpy(d_indices, grid->getIndices(), grid->numOfCells()*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_vehicles, grid->getAgents(), grid->numOfAgents()*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( findNeighborsKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), d_indices, d_vehicles, d_neighborData, radius);
grid->clear();
}
void OpenSteer::FindNeighbors::close()
{
delete grid;
if (d_neighborData != NULL) {
hipFree(d_neighborData);
d_neighborData = NULL;
}
if (d_indices != NULL) {
hipFree(d_indices);
d_indices = NULL;
}
if (d_vehicles != NULL) {
hipFree(d_vehicles);
d_vehicles = NULL;
}
}
|
908b289dc34a1f6873a4be712c8d9545f5c941b3.cu
|
#include "FindNeighbors.h"
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "OpenSteer/MemoryBackend.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
findNeighborsKernel(VehicleData* vehicleData, int* indices, int* vehicles, NeighborData* neighbors, float radius);
OpenSteer::FindNeighbors::FindNeighbors(float radius)
{
grid = NULL;
d_neighborData = NULL;
d_indices = NULL;
d_vehicles = NULL;
threadsPerBlock = 128;
this->radius = radius;
}
OpenSteer::FindNeighbors::~FindNeighbors() {}
void OpenSteer::FindNeighbors::init()
{
grid = new Grid();
// device memory for neighbor data
mem_size_neighbor_data = getNumberOfAgents()*sizeof(NeighborData);
cudaError_t retval = cudaMalloc((void **)&d_neighborData, mem_size_neighbor_data);
if (retval != cudaSuccess)
cout << "Error while allocating d_neighborData memory: " << cudaGetErrorString(retval) << endl;
// device memory for neighbor indices
mem_size_neighbor_indices = grid->numOfCells()*sizeof(int);
retval = cudaMalloc((void **)&d_indices, mem_size_neighbor_indices);
if (retval != cudaSuccess)
cout << "Error while allocating d_indices memory: " << cudaGetErrorString(retval) << endl;
// device memory for neighbor agents
mem_size_neighbor_vehicles = getNumberOfAgents()*sizeof(int);
retval = cudaMalloc((void **)&d_vehicles, mem_size_neighbor_vehicles);
if (retval != cudaSuccess)
cout << "Error while allocating d_agents memory: " << cudaGetErrorString(retval) << endl;
}
void OpenSteer::FindNeighbors::run()
{
MemoryBackend *mb = getMemoryBackend();
for (int i = 0; i < getNumberOfAgents(); i++) {
grid->save(mb->position(i).x, mb->position(i).y, mb->position(i).z, i);
}
cudaMemcpy(d_indices, grid->getIndices(), grid->numOfCells()*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_vehicles, grid->getAgents(), grid->numOfAgents()*sizeof(int), cudaMemcpyHostToDevice);
findNeighborsKernel<<<gridDim(), blockDim()>>>(getVehicleData(), d_indices, d_vehicles, d_neighborData, radius);
grid->clear();
}
void OpenSteer::FindNeighbors::close()
{
delete grid;
if (d_neighborData != NULL) {
cudaFree(d_neighborData);
d_neighborData = NULL;
}
if (d_indices != NULL) {
cudaFree(d_indices);
d_indices = NULL;
}
if (d_vehicles != NULL) {
cudaFree(d_vehicles);
d_vehicles = NULL;
}
}
|
914e89d69c219445bb7349d53fe6c065d799e77f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "voxel_backprojection_parallel_spherical.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
| / /| |
A Z | / / |*D |
| | +--------+ | |
| | | | | |
| | | *O | + |
*--->y | | | / |
/ | | |/ |
V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
__global__ void kernelPixelBackprojection_parallel_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
source.y=P.y;
source.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_parallel_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nangles){
// mexPrintf("In fucntion COR %p \n",geo.COR);
// mexPrintf("In fucntion offOrig %p \n",geo.offOrigX);
// return 0;
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nangles);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSphericalParallel(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
hipLaunchKernelGGL(( kernelPixelBackprojection_parallel_spherical), dim3(grid),dim3(block), 0, 0, geo,dimage,i,geo.COR[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
//hipDeviceReset();
return 0;
}
void computeDeltasCubeSphericalParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
// P.x=P.x+(geo.DSD-geo.DSO);
// Px.x=Px.x+(geo.DSD-geo.DSO);
// Py.x=Py.x+(geo.DSD-geo.DSO);
// Pz.x=Pz.x+(geo.DSD-geo.DSO);
// rollPitchYawT(geo,i,&P);
// rollPitchYawT(geo,i,&Px);
// rollPitchYawT(geo,i,&Py);
// rollPitchYawT(geo,i,&Pz);
//
// P.x=P.x-(geo.DSD-geo.DSO);
// Px.x=Px.x-(geo.DSD-geo.DSO);
// Py.x=Py.x-(geo.DSD-geo.DSO);
// Pz.x=Pz.x-(geo.DSD-geo.DSO);
// Done for P, now source
// Point3D source;
// source.x=geo.DSD; //allready offseted for rotation
// source.y=-geo.offDetecU[i];
// source.z=-geo.offDetecV[i];
// rollPitchYawT(geo,i,&source);
// source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
Point3D source;
source.x=geo.DSO; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
|
914e89d69c219445bb7349d53fe6c065d799e77f.cu
|
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "voxel_backprojection_parallel_spherical.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
| / /| |
A Z | / / |*D |
| | +--------+ | |
| | | | | |
| | | *O | + |
*--->y | | | / |
/ | | |/ |
V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
__global__ void kernelPixelBackprojection_parallel_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
source.y=P.y;
source.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_parallel_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nangles){
// mexPrintf("In fucntion COR %p \n",geo.COR);
// mexPrintf("In fucntion offOrig %p \n",geo.offOrigX);
// return 0;
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nangles);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSphericalParallel(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
kernelPixelBackprojection_parallel_spherical<<<grid,block>>>(geo,dimage,i,geo.COR[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
//cudaDeviceReset();
return 0;
}
void computeDeltasCubeSphericalParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
// P.x=P.x+(geo.DSD-geo.DSO);
// Px.x=Px.x+(geo.DSD-geo.DSO);
// Py.x=Py.x+(geo.DSD-geo.DSO);
// Pz.x=Pz.x+(geo.DSD-geo.DSO);
// rollPitchYawT(geo,i,&P);
// rollPitchYawT(geo,i,&Px);
// rollPitchYawT(geo,i,&Py);
// rollPitchYawT(geo,i,&Pz);
//
// P.x=P.x-(geo.DSD-geo.DSO);
// Px.x=Px.x-(geo.DSD-geo.DSO);
// Py.x=Py.x-(geo.DSD-geo.DSO);
// Pz.x=Pz.x-(geo.DSD-geo.DSO);
// Done for P, now source
// Point3D source;
// source.x=geo.DSD; //allready offseted for rotation
// source.y=-geo.offDetecU[i];
// source.z=-geo.offDetecV[i];
// rollPitchYawT(geo,i,&source);
// source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
Point3D source;
source.x=geo.DSO; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
|
47010819fbc0e6451810d10062d906f7272f78ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
/**
* Encrypt Program Cuda
*
* This program encrypts a file using a degree 2 formula using Cuda
* Parallelization and then decrypts the file using another degree 2
* formula.
*
* @Author: Clayton Chase Glenn
*/
#define MAX 20
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void encrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void decrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it checks if the corresponding
* character in array a matches b.
**/
__global__
void isMatch(char *p, char *c, int *a, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) {
if (c[tid] != p[tid]) {
*a = 1;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./encrypt [File to Encrypt]\n");
fprintf(stderr, " or\n");
fprintf(stderr, " ./encrypt -n [2^(1:20)]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MAX;
char *buf;
// Check for immediate errors in args
if (argc < 2) printerror();
if (argc == 3 && strcmp(argv[1], "-n")) printerror();
// If args match for testing, Initiallize the program
if(!strcmp(argv[1], "-n") && argc == 3){
// Get total characters from args
N = strtol(argv[2], NULL, 10);
// Show error if N isn't within constraints
if(N < 1 || N > 20) printerror();
// N <- calc to 2^N as size and allocate space
N = (int)pow(2, N);
buf = (char*)malloc(N*sizeof(char));
//Initiallize the buffer to Zero
int i = 0;
while (i < N) buf[i++] = 48;
}
// If 2 args, this means file
if(argc == 2) {
// Declare a file pointer, character array, and single character for reading
FILE *fp;
char c;
char chars[1048576];
int i = 0;
// Open the file for reading
fp = fopen(argv[1], "r");
// If file is null, file does not exist or error
if (fp == NULL) {
fprintf(stderr, "Not a Valid File\n");
return (-1);
}
// Read each character and keep within 2^20, add to array
while((c = fgetc(fp)) != EOF) {
if (i >= 1048576) {
fprintf(stderr, "File Too Large\n");
return (-1);
}
chars[i++] = c;
}
// Increment i for space and allocate space for buffer
N = i + 1;
buf = (char*)malloc(N*sizeof(char));
// Copy read elements into buffer
i = 0;
while(i < N) buf[i] = chars[i++];
// Close File, not needed anymore
fclose(fp);
}
// Initiallize Character Arrays for Encrypting and manual memset
char h_p[N];
char h_c[N];
char h_r[N];
int i = 0;
while (i < N) {
h_p[i] = buf[i];
h_c[i] = 32;
h_r[i++] = 32;
}
// Init all other variables
char *dev_p, *dev_c, *dev_r;
int *match;
int h_match = 0;
int h_a = 171, h_b = 55;
int r_a = 3, r_b = 91;
hipEvent_t start1, start2, start3, startf, stop1, stop2, stop3, stopf;
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventCreate(&start2);
hipEventCreate(&stop2);
hipEventCreate(&start3);
hipEventCreate(&stop3);
hipEventCreate(&startf);
hipEventCreate(&stopf);
float final_time1 = 0.0, final_time2 = 0.0, final_time3 = 0.0, final_timef = 0.0;
// Allocate Memory for match flag
match = (int*)malloc(sizeof(int));
*match = 0;
// Allocate memory in the GPU for the character arrays
hipMalloc(&dev_p, N*sizeof(char));
hipMalloc(&dev_c, N*sizeof(char));
hipMalloc(&dev_r, N*sizeof(char));
hipMalloc(&match, sizeof(int));
// Print N for distinguish
printf("N: %d\n", N);
// If debug on, show plain text
if(DEBUG) {
printf("Plain Text: ");
i = 0;
while(i < N) printf("%c", h_p[i++]);
printf("\n");
}
// Copy the Memory from the arrays to the array pointers
hipMemcpy(dev_p, h_p, N*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(dev_c, h_c, N*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(dev_r, h_r, N*sizeof(char), hipMemcpyHostToDevice);
// Start Total Time Record
hipEventRecord(startf);
// Encrypt the Plain Text and Record Start and Finish
hipEventRecord(start1);
hipLaunchKernelGGL(( encrypt), dim3(128), dim3(128), 0, 0, dev_p, dev_c, h_a, h_b, N);
hipEventRecord(stop1);
// Copy the results from GPU to the CPU
hipMemcpy(h_c, dev_c, N*sizeof(char), hipMemcpyDeviceToHost);
// If debug on, show encrypted text
if(DEBUG) {
printf("Encrypted Text: ");
i = 0;
while(i < N) printf("%c", h_c[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
hipEventSynchronize(stop1);
hipEventElapsedTime(&final_time1, start1, stop1);
// Decrypt the Encrypted Text
hipEventRecord(stop2);
hipLaunchKernelGGL(( decrypt), dim3(128), dim3(128), 0, 0, dev_c, dev_r, r_a, r_b, N);
hipEventRecord(stop2);
// Copy the results from GPU to CPU
hipMemcpy(h_r, dev_r, N*sizeof(char), hipMemcpyDeviceToHost);
// If debug on, show decrypted text
if(DEBUG) {
printf("Decrypted Text: ", h_r);
i = 0;
while(i < N) printf("%c", h_r[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
hipEventSynchronize(stop2);
hipEventElapsedTime(&final_time2, start2, stop2);
// Check if Plain Text and Encrypt<-->Decrypt Text is matching by GPU
hipEventRecord(start3);
hipLaunchKernelGGL(( isMatch), dim3(128), dim3(128), 0, 0, dev_r, dev_p, match, N);
hipEventRecord(stop3);
// Copy the Match Result from GPU to CPU
hipMemcpy(&h_match, match, sizeof(int), hipMemcpyDeviceToHost);
// If match is zero, success, else, no success
if (h_match) fprintf(stdout, "Does not Match\n");
else fprintf(stdout, "Does Match\n");
// Syncronize all blocks and threads in GPU and get time
hipEventSynchronize(stop3);
hipEventElapsedTime(&final_time3, start3, stop3);
// Syncronize all blocks and threads in GPU and get time
hipEventRecord(stopf);
hipEventSynchronize(stopf);
hipEventElapsedTime(&final_timef, startf, stopf);
// Print Times
printf("Encrypt Time: %4.10f seconds\n", final_time1/1000);
printf("Decrypt Time: %4.10f seconds\n", final_time2/1000);
printf("Match Time: %4.10f seconds\n", final_time3/1000);
printf("Total Time: %4.10f seconds\n\n", final_timef/1000);
// Free the GPU memory
hipFree(dev_p);
hipFree(dev_c);
hipFree(dev_r);
}
|
47010819fbc0e6451810d10062d906f7272f78ae.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
/**
* Encrypt Program Cuda
*
* This program encrypts a file using a degree 2 formula using Cuda
* Parallelization and then decrypts the file using another degree 2
* formula.
*
* @Author: Clayton Chase Glenn
*/
#define MAX 20
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void encrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void decrypt(char *p, char *c, int a, int b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) c[tid] = (a*p[tid] + b) % 256;
}
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it checks if the corresponding
* character in array a matches b.
**/
__global__
void isMatch(char *p, char *c, int *a, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) {
if (c[tid] != p[tid]) {
*a = 1;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./encrypt [File to Encrypt]\n");
fprintf(stderr, " or\n");
fprintf(stderr, " ./encrypt -n [2^(1:20)]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MAX;
char *buf;
// Check for immediate errors in args
if (argc < 2) printerror();
if (argc == 3 && strcmp(argv[1], "-n")) printerror();
// If args match for testing, Initiallize the program
if(!strcmp(argv[1], "-n") && argc == 3){
// Get total characters from args
N = strtol(argv[2], NULL, 10);
// Show error if N isn't within constraints
if(N < 1 || N > 20) printerror();
// N <- calc to 2^N as size and allocate space
N = (int)pow(2, N);
buf = (char*)malloc(N*sizeof(char));
//Initiallize the buffer to Zero
int i = 0;
while (i < N) buf[i++] = 48;
}
// If 2 args, this means file
if(argc == 2) {
// Declare a file pointer, character array, and single character for reading
FILE *fp;
char c;
char chars[1048576];
int i = 0;
// Open the file for reading
fp = fopen(argv[1], "r");
// If file is null, file does not exist or error
if (fp == NULL) {
fprintf(stderr, "Not a Valid File\n");
return (-1);
}
// Read each character and keep within 2^20, add to array
while((c = fgetc(fp)) != EOF) {
if (i >= 1048576) {
fprintf(stderr, "File Too Large\n");
return (-1);
}
chars[i++] = c;
}
// Increment i for space and allocate space for buffer
N = i + 1;
buf = (char*)malloc(N*sizeof(char));
// Copy read elements into buffer
i = 0;
while(i < N) buf[i] = chars[i++];
// Close File, not needed anymore
fclose(fp);
}
// Initiallize Character Arrays for Encrypting and manual memset
char h_p[N];
char h_c[N];
char h_r[N];
int i = 0;
while (i < N) {
h_p[i] = buf[i];
h_c[i] = 32;
h_r[i++] = 32;
}
// Init all other variables
char *dev_p, *dev_c, *dev_r;
int *match;
int h_match = 0;
int h_a = 171, h_b = 55;
int r_a = 3, r_b = 91;
cudaEvent_t start1, start2, start3, startf, stop1, stop2, stop3, stopf;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
cudaEventCreate(&startf);
cudaEventCreate(&stopf);
float final_time1 = 0.0, final_time2 = 0.0, final_time3 = 0.0, final_timef = 0.0;
// Allocate Memory for match flag
match = (int*)malloc(sizeof(int));
*match = 0;
// Allocate memory in the GPU for the character arrays
cudaMalloc(&dev_p, N*sizeof(char));
cudaMalloc(&dev_c, N*sizeof(char));
cudaMalloc(&dev_r, N*sizeof(char));
cudaMalloc(&match, sizeof(int));
// Print N for distinguish
printf("N: %d\n", N);
// If debug on, show plain text
if(DEBUG) {
printf("Plain Text: ");
i = 0;
while(i < N) printf("%c", h_p[i++]);
printf("\n");
}
// Copy the Memory from the arrays to the array pointers
cudaMemcpy(dev_p, h_p, N*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, h_c, N*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_r, h_r, N*sizeof(char), cudaMemcpyHostToDevice);
// Start Total Time Record
cudaEventRecord(startf);
// Encrypt the Plain Text and Record Start and Finish
cudaEventRecord(start1);
encrypt<<<128, 128>>>(dev_p, dev_c, h_a, h_b, N);
cudaEventRecord(stop1);
// Copy the results from GPU to the CPU
cudaMemcpy(h_c, dev_c, N*sizeof(char), cudaMemcpyDeviceToHost);
// If debug on, show encrypted text
if(DEBUG) {
printf("Encrypted Text: ");
i = 0;
while(i < N) printf("%c", h_c[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&final_time1, start1, stop1);
// Decrypt the Encrypted Text
cudaEventRecord(stop2);
decrypt<<<128, 128>>>(dev_c, dev_r, r_a, r_b, N);
cudaEventRecord(stop2);
// Copy the results from GPU to CPU
cudaMemcpy(h_r, dev_r, N*sizeof(char), cudaMemcpyDeviceToHost);
// If debug on, show decrypted text
if(DEBUG) {
printf("Decrypted Text: ", h_r);
i = 0;
while(i < N) printf("%c", h_r[i++]);
printf("\n");
}
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&final_time2, start2, stop2);
// Check if Plain Text and Encrypt<-->Decrypt Text is matching by GPU
cudaEventRecord(start3);
isMatch<<<128, 128>>>(dev_r, dev_p, match, N);
cudaEventRecord(stop3);
// Copy the Match Result from GPU to CPU
cudaMemcpy(&h_match, match, sizeof(int), cudaMemcpyDeviceToHost);
// If match is zero, success, else, no success
if (h_match) fprintf(stdout, "Does not Match\n");
else fprintf(stdout, "Does Match\n");
// Syncronize all blocks and threads in GPU and get time
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&final_time3, start3, stop3);
// Syncronize all blocks and threads in GPU and get time
cudaEventRecord(stopf);
cudaEventSynchronize(stopf);
cudaEventElapsedTime(&final_timef, startf, stopf);
// Print Times
printf("Encrypt Time: %4.10f seconds\n", final_time1/1000);
printf("Decrypt Time: %4.10f seconds\n", final_time2/1000);
printf("Match Time: %4.10f seconds\n", final_time3/1000);
printf("Total Time: %4.10f seconds\n\n", final_timef/1000);
// Free the GPU memory
cudaFree(dev_p);
cudaFree(dev_c);
cudaFree(dev_r);
}
|
866e90c833c1cafb6f604515b2273ad74c42bdfc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0);
int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (reduction == Reduction::None && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<scalar_t, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<scalar_t, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (reduction == Reduction::None && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<scalar_t, 1>(state, gradOutput),
toDeviceTensor<scalar_t, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t>)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else {
hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t>)
, dim3(1), dim3(NTHREADS), 0, THCState_getCurrentStream(state),
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(hipGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
|
866e90c833c1cafb6f604515b2273ad74c42bdfc.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/ClassNLLCriterion.cu"
#else
void THNN_(ClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1);
ignore_index -= TH_INDEX_BASE;
if (weights) {
THCUNN_assertSameGPU(
state, 5, input, target, weights, output, total_weight
);
} else {
THCUNN_assertSameGPU(
state, 4, input, target, output, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0);
int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);
THError("weight tensor should be defined either for all %d classes or no classes"
" but got weight tensor of shape: %s", n_classes, s1.str);
}
if (reduction == Reduction::None && n_dims == 2) {
THCTensor_(resize1d)(state, output, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<scalar_t, 2>(state, input),
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<scalar_t, 1>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
THCTensor_(resize1d)(state, total_weight, 1);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) {
cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(ClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index) {
if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) {
THError("multi-target not supported");
}
int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
int n_classes = THCTensor_(size)(state, input, n_dims - 1);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");
if (weights) {
THCUNN_assertSameGPU(
state, 5, weights, input, target, gradInput, total_weight
);
}
else {
THCUNN_assertSameGPU(
state, 4, input, target, gradInput, total_weight
);
}
THArgCheck(!input->is_empty() && (n_dims <= 2 && n_dims > 0), 2, "non-empty vector or matrix expected");
int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);
int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0);
THArgCheck(batch_size == num_targets,
2, "mismatch between the batch size of input (%ld) and that of target (%ld)",
batch_size, num_targets);
if (weights && THCTensor_(nElement)(state, weights) != n_classes) {
THError("weight tensor should be defined either for all or no classes");
}
if (reduction == Reduction::None && n_dims == 2) {
THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size);
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
batch_size,
toDeviceTensor<THCIndex_t, 1>(state, target),
toDeviceTensor<scalar_t, 1>(state, gradOutput),
toDeviceTensor<scalar_t, 2>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
n_classes,
ignore_index);
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
ignore_index -= TH_INDEX_BASE;
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) {
cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t>
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
weights_data,
target_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
n_classes,
ignore_index
);
} else {
cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t>
<<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == Reduction::ElementwiseMean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
n_classes,
ignore_index
);
}
THCudaCheck(cudaGetLastError());
if (weights) {
THCTensor_(free)(state, weights);
}
THCIndexTensor_(free)(state, target);
}
#endif
|
3cf3f497916d962a79dd9aa3e4f5db7421fa65b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gq58x3_CUDA_CALL(x) do { if((x) != hipSuccess) { printf("Error: %s at %s:%d\n",hipGetErrorString(hipGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gq58x3_BLOCKS 128
#define gq58x3_THREADS 192
#define gq58x3_ARRAY_SECTIONS (gq58x3_BLOCKS*gq58x3_THREADS/12)
#define gq58x3_k 8
#define gq58x3_q 48
#define gq58x3_g 288230374541099008ULL
#define gq58x3_gdiv8 36028796817637376ULL
typedef unsigned long long lt;
typedef struct{
lt xN[12] __attribute__ ((aligned(16))),
xP[12] __attribute__ ((aligned(16)));
} gq58x3_state;
typedef gq58x3_state gq58x3_sse_state;
lt gq58x3_sse_Consts[10] __attribute__ ((aligned(16))) =
{13835057977972752384ULL,13835057977972752384ULL,1610612736ULL,1610612736ULL,
288230371923853311ULL,288230371923853311ULL,288230374541099008ULL,288230374541099008ULL,
18157383382357244923ULL,18157383382357244923ULL};
extern "C" __host__ unsigned int gq58x3_sse_generate_(gq58x3_sse_state* state){
unsigned output;
asm volatile("movaps (%3),%%xmm0\n" \
"movaps (%2),%%xmm1\n" \
"movaps (%1),%%xmm4\n" \
"movaps %%xmm4,(%2)\n" \
"psllq $3,%%xmm4\n" \
"paddq %%xmm0,%%xmm4\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"paddq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,(%1)\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm4\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%1),%%xmm5\n" \
"movaps %%xmm5,16(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,16(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 32(%2),%%xmm1\n" \
"movaps 32(%1),%%xmm6\n" \
"movaps %%xmm6,32(%2)\n" \
"psllq $3,%%xmm6\n" \
"paddq %%xmm0,%%xmm6\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"paddq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,32(%1)\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm6\n" \
"movaps 48(%2),%%xmm1\n" \
"movaps 48(%1),%%xmm7\n" \
"movaps %%xmm7,48(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,48(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm4\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm6\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packssdw %%xmm7,%%xmm6\n" \
"movaps 64(%2),%%xmm1\n" \
"movaps 64(%1),%%xmm5\n" \
"movaps %%xmm5,64(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,64(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 80(%2),%%xmm1\n" \
"movaps 80(%1),%%xmm7\n" \
"movaps %%xmm7,80(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,80(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm7,%%xmm5\n" \
"packssdw %%xmm4,%%xmm4\n" \
"packssdw %%xmm6,%%xmm6\n" \
"packssdw %%xmm5,%%xmm5\n" \
"packsswb %%xmm4,%%xmm4\n" \
"packsswb %%xmm6,%%xmm6\n" \
"packsswb %%xmm5,%%xmm5\n" \
"pand 64(%3),%%xmm4\n" \
"pslld $6,%%xmm4\n" \
"pxor %%xmm4,%%xmm5\n" \
"pslld $3,%%xmm6\n" \
"pxor %%xmm6,%%xmm5\n" \
"movd %%xmm5,%0\n" \
"":"=&r"(output):"r"(state->xN),"r"(state->xP),"r"(gq58x3_sse_Consts));
return output;
}
extern "C" __device__ __host__ void gq58x3_get_sse_state_(gq58x3_state* state,gq58x3_sse_state* sse_state){
int i; for(i=0;i<12;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
extern "C" __device__ __host__ lt gq58x3_mod_g(lt x){ // returns x (mod g)
lt F,G; F = (x>>58); G = x-(F<<58)+(F<<29)+(F<<30);
return ((G>=gq58x3_g) ? (G-gq58x3_g) : G);
}
extern "C" __device__ __host__ lt gq58x3_MyMult(lt A,lt B){ // returns AB (mod gq58x3_g), where it is implied that A,B<gq58x3_g;
lt A1,A0,B1,B0,curr,x,m;
A1=A>>32; B1=B>>32; A0=A-(A1<<32)+(12*A1); B0=B-(B1<<32)+(12*B1);
if(A0>>32) {A0-=4294967284ULL; A1++;}
if(B0>>32) {B0-=4294967284ULL; B1++;}
curr=A1*B0+B1*A0; m=curr>>26; x=curr-(m<<26);
curr=((3*m+(x<<4))<<28)+(gq58x3_g-12*x)+(144*A1*B1)+(gq58x3_mod_g(A0*B0));
return gq58x3_mod_g(curr);
}
extern "C" __device__ __host__ lt gq58x3_CNext2(lt N,lt P,lt myk,lt myq){ // returns (myk*N-myq*P) (mod gq58x3_g)
lt curr1,curr2;
curr1=gq58x3_MyMult(myk,N); curr2=gq58x3_MyMult(myq,P);
if(curr1>=curr2) return (curr1-curr2); else return (gq58x3_g+curr1-curr2);
}
extern "C" __device__ __host__ lt gq58x3_CNext(lt N,lt P){ // returns (8N-48P) (mod gq58x3_g)
return gq58x3_mod_g((N+6*(gq58x3_g-P))<<3);
}
extern "C" __device__ __host__ lt gq58x3_GetNextN(lt x0,lt x1,unsigned int n){ //returns x_{2^n}
lt myk=gq58x3_k,myq=gq58x3_q,i,x=x1;
for(i=0;i<n;i++){
x=gq58x3_CNext2(x,x0,myk,myq);
myk=gq58x3_CNext2(myk,2,myk,myq);
myq=gq58x3_CNext2(myq,0,myq,0);
}
return x;
}
extern "C" __device__ __host__ lt gq58x3_GetNextAny(lt x0,lt x1,lt N64,lt N0){ //N=2^64*N64+N0+1
lt i,xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N0; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
i=N64; shift=64; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp; // returns x_N, where N=2^64*N64+N0+1
}
extern "C" __device__ __host__ void gq58x3_skipahead_(gq58x3_state* state, lt offset64, lt offset0){ // offset=offset64*2^64+offset0+1
lt xn,xp,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0);
xn=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_(gq58x3_state* state){
lt x0=100142853817629549ULL,x1=133388305121829306ULL,xp,xn,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(x0,x1,0,24014539279611495ULL);
xn=gq58x3_GetNextAny(x0,x1,0,24014539279611496ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_short_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^8; length of each sequence <= 8*10^7
gq58x3_skipahead_(state,0,82927047ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_medium_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^6; length of each sequence <= 8*10^9
gq58x3_skipahead_(state,0,8799201913ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_long_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^4; length of each sequence <= 8*10^11
gq58x3_skipahead_(state,0,828317697521ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ unsigned int gq58x3_generate_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return sum;
}
extern "C" __device__ __host__ float gq58x3_generate_uniform_float_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return ((float) sum) * 2.3283064365386963e-10;
}
extern "C" __host__ void gq58x3_print_state_(gq58x3_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
extern "C" __host__ void gq58x3_print_sse_state_(gq58x3_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
__global__ void gq58x3_kernel_generate_array(gq58x3_state* state, unsigned int* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=sum; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_(gq58x3_state* state, unsigned int* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gq58x3_state),hipMemcpyHostToDevice));
gq58x3_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gq58x3_kernel_generate_array), dim3(gq58x3_BLOCKS),dim3(gq58x3_THREADS), 0, 0, dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(hipGetLastError());
gq58x3_CUDA_CALL(hipFree(dev_state)); gq58x3_CUDA_CALL(hipFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_float(gq58x3_state* state, float* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_float_(gq58x3_state* state, float* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gq58x3_state),hipMemcpyHostToDevice));
gq58x3_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gq58x3_kernel_generate_array_float), dim3(gq58x3_BLOCKS),dim3(gq58x3_THREADS), 0, 0, dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(hipGetLastError());
gq58x3_CUDA_CALL(hipFree(dev_state)); gq58x3_CUDA_CALL(hipFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_double(gq58x3_state* state, double* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_double_(gq58x3_state* state, double* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gq58x3_state),hipMemcpyHostToDevice));
gq58x3_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gq58x3_kernel_generate_array_double), dim3(gq58x3_BLOCKS),dim3(gq58x3_THREADS), 0, 0, dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(hipGetLastError());
gq58x3_CUDA_CALL(hipFree(dev_state)); gq58x3_CUDA_CALL(hipFree(dev_length));
}
extern "C" __host__ void gq58x3_generate_array_(gq58x3_state* state, unsigned int* out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_out,mylength*gq58x3_ARRAY_SECTIONS*sizeof(unsigned int)));
gq58x3_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gq58x3_state),hipMemcpyHostToDevice));
gq58x3_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gq58x3_kernel_generate_array), dim3(gq58x3_BLOCKS),dim3(gq58x3_THREADS), 0, 0, dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(hipGetLastError());
gq58x3_CUDA_CALL(hipMemcpy(out,dev_out,(*length)*sizeof(unsigned int),hipMemcpyDeviceToHost));
gq58x3_CUDA_CALL(hipFree(dev_state)); gq58x3_CUDA_CALL(hipFree(dev_out));
gq58x3_CUDA_CALL(hipFree(dev_length));
}
|
3cf3f497916d962a79dd9aa3e4f5db7421fa65b1.cu
|
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gq58x3_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gq58x3_BLOCKS 128
#define gq58x3_THREADS 192
#define gq58x3_ARRAY_SECTIONS (gq58x3_BLOCKS*gq58x3_THREADS/12)
#define gq58x3_k 8
#define gq58x3_q 48
#define gq58x3_g 288230374541099008ULL
#define gq58x3_gdiv8 36028796817637376ULL
typedef unsigned long long lt;
typedef struct{
lt xN[12] __attribute__ ((aligned(16))),
xP[12] __attribute__ ((aligned(16)));
} gq58x3_state;
typedef gq58x3_state gq58x3_sse_state;
lt gq58x3_sse_Consts[10] __attribute__ ((aligned(16))) =
{13835057977972752384ULL,13835057977972752384ULL,1610612736ULL,1610612736ULL,
288230371923853311ULL,288230371923853311ULL,288230374541099008ULL,288230374541099008ULL,
18157383382357244923ULL,18157383382357244923ULL};
extern "C" __host__ unsigned int gq58x3_sse_generate_(gq58x3_sse_state* state){
unsigned output;
asm volatile("movaps (%3),%%xmm0\n" \
"movaps (%2),%%xmm1\n" \
"movaps (%1),%%xmm4\n" \
"movaps %%xmm4,(%2)\n" \
"psllq $3,%%xmm4\n" \
"paddq %%xmm0,%%xmm4\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"paddq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,(%1)\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm4\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%1),%%xmm5\n" \
"movaps %%xmm5,16(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,16(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 32(%2),%%xmm1\n" \
"movaps 32(%1),%%xmm6\n" \
"movaps %%xmm6,32(%2)\n" \
"psllq $3,%%xmm6\n" \
"paddq %%xmm0,%%xmm6\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"paddq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,32(%1)\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm6\n" \
"movaps 48(%2),%%xmm1\n" \
"movaps 48(%1),%%xmm7\n" \
"movaps %%xmm7,48(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,48(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm4\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm6\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packssdw %%xmm7,%%xmm6\n" \
"movaps 64(%2),%%xmm1\n" \
"movaps 64(%1),%%xmm5\n" \
"movaps %%xmm5,64(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,64(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 80(%2),%%xmm1\n" \
"movaps 80(%1),%%xmm7\n" \
"movaps %%xmm7,80(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,80(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm7,%%xmm5\n" \
"packssdw %%xmm4,%%xmm4\n" \
"packssdw %%xmm6,%%xmm6\n" \
"packssdw %%xmm5,%%xmm5\n" \
"packsswb %%xmm4,%%xmm4\n" \
"packsswb %%xmm6,%%xmm6\n" \
"packsswb %%xmm5,%%xmm5\n" \
"pand 64(%3),%%xmm4\n" \
"pslld $6,%%xmm4\n" \
"pxor %%xmm4,%%xmm5\n" \
"pslld $3,%%xmm6\n" \
"pxor %%xmm6,%%xmm5\n" \
"movd %%xmm5,%0\n" \
"":"=&r"(output):"r"(state->xN),"r"(state->xP),"r"(gq58x3_sse_Consts));
return output;
}
extern "C" __device__ __host__ void gq58x3_get_sse_state_(gq58x3_state* state,gq58x3_sse_state* sse_state){
int i; for(i=0;i<12;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
extern "C" __device__ __host__ lt gq58x3_mod_g(lt x){ // returns x (mod g)
lt F,G; F = (x>>58); G = x-(F<<58)+(F<<29)+(F<<30);
return ((G>=gq58x3_g) ? (G-gq58x3_g) : G);
}
extern "C" __device__ __host__ lt gq58x3_MyMult(lt A,lt B){ // returns AB (mod gq58x3_g), where it is implied that A,B<gq58x3_g;
lt A1,A0,B1,B0,curr,x,m;
A1=A>>32; B1=B>>32; A0=A-(A1<<32)+(12*A1); B0=B-(B1<<32)+(12*B1);
if(A0>>32) {A0-=4294967284ULL; A1++;}
if(B0>>32) {B0-=4294967284ULL; B1++;}
curr=A1*B0+B1*A0; m=curr>>26; x=curr-(m<<26);
curr=((3*m+(x<<4))<<28)+(gq58x3_g-12*x)+(144*A1*B1)+(gq58x3_mod_g(A0*B0));
return gq58x3_mod_g(curr);
}
extern "C" __device__ __host__ lt gq58x3_CNext2(lt N,lt P,lt myk,lt myq){ // returns (myk*N-myq*P) (mod gq58x3_g)
lt curr1,curr2;
curr1=gq58x3_MyMult(myk,N); curr2=gq58x3_MyMult(myq,P);
if(curr1>=curr2) return (curr1-curr2); else return (gq58x3_g+curr1-curr2);
}
extern "C" __device__ __host__ lt gq58x3_CNext(lt N,lt P){ // returns (8N-48P) (mod gq58x3_g)
return gq58x3_mod_g((N+6*(gq58x3_g-P))<<3);
}
extern "C" __device__ __host__ lt gq58x3_GetNextN(lt x0,lt x1,unsigned int n){ //returns x_{2^n}
lt myk=gq58x3_k,myq=gq58x3_q,i,x=x1;
for(i=0;i<n;i++){
x=gq58x3_CNext2(x,x0,myk,myq);
myk=gq58x3_CNext2(myk,2,myk,myq);
myq=gq58x3_CNext2(myq,0,myq,0);
}
return x;
}
extern "C" __device__ __host__ lt gq58x3_GetNextAny(lt x0,lt x1,lt N64,lt N0){ //N=2^64*N64+N0+1
lt i,xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N0; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
i=N64; shift=64; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp; // returns x_N, where N=2^64*N64+N0+1
}
extern "C" __device__ __host__ void gq58x3_skipahead_(gq58x3_state* state, lt offset64, lt offset0){ // offset=offset64*2^64+offset0+1
lt xn,xp,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0);
xn=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_(gq58x3_state* state){
lt x0=100142853817629549ULL,x1=133388305121829306ULL,xp,xn,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(x0,x1,0,24014539279611495ULL);
xn=gq58x3_GetNextAny(x0,x1,0,24014539279611496ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_short_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^8; length of each sequence <= 8*10^7
gq58x3_skipahead_(state,0,82927047ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_medium_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^6; length of each sequence <= 8*10^9
gq58x3_skipahead_(state,0,8799201913ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_long_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^4; length of each sequence <= 8*10^11
gq58x3_skipahead_(state,0,828317697521ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ unsigned int gq58x3_generate_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return sum;
}
extern "C" __device__ __host__ float gq58x3_generate_uniform_float_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return ((float) sum) * 2.3283064365386963e-10;
}
extern "C" __host__ void gq58x3_print_state_(gq58x3_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
extern "C" __host__ void gq58x3_print_sse_state_(gq58x3_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
__global__ void gq58x3_kernel_generate_array(gq58x3_state* state, unsigned int* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=sum; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_(gq58x3_state* state, unsigned int* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_float(gq58x3_state* state, float* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_float_(gq58x3_state* state, float* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array_float<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_double(gq58x3_state* state, double* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_double_(gq58x3_state* state, double* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array_double<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
extern "C" __host__ void gq58x3_generate_array_(gq58x3_state* state, unsigned int* out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gq58x3_ARRAY_SECTIONS*sizeof(unsigned int)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaMemcpy(out,dev_out,(*length)*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_out));
gq58x3_CUDA_CALL(cudaFree(dev_length));
}
|
b5ce393be0456337be3b52a73bfa7b73cf7d0276.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "settings.h"
#include <iomanip>
#include <string>
#include <sstream>
#include <vector>
#include <map>
#include <memory>
#include <algorithm>
#include <functional>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <getopt.h>
struct OptionIface;
using std::string;
using std::vector;
using std::make_shared;
typedef std::shared_ptr<OptionIface> OptionPtr;
typedef std::map<int, OptionPtr> OptionMap;
struct OptionIface
{
const char* type;
const char* name;
const char* description;
const char* defaultValue;
int hasArgument;
virtual ~OptionIface() = default;
OptionIface(const char* type, const char* name, const char* description)
: type(type), name(name), description(description), hasArgument(no_argument) { }
OptionIface(const char* type, const char* name, const char* description, const char* dvalue)
: type(type), name(name), description(description), defaultValue(dvalue), hasArgument(no_argument) { }
virtual void parseArgument(const char* optstr, const char* optarg) = 0;
virtual void throwError(const char*, const char* optarg) const
{
throw string("Option ") + name + string(" expects a ") + type + string(", but got `") + optarg + string("'");
}
};
template <typename T>
struct Option: public OptionIface
{
T& value;
Option() = delete;
Option(Option&& rhs) = delete;
Option(const Option& rhs) = delete;
Option(T& value, const char* type, const char* name, const char* description)
: OptionIface(type, name, description)
, value(value)
{
hasArgument = required_argument;
}
Option(T& value, const char* type, const char* name, const char* description, const char* dvalue)
: OptionIface(type, name, description, dvalue)
, value(value)
{
hasArgument = required_argument;
}
void parseArgument(const char* optstr, const char* optarg) override;
};
template <>
void Option<uint32_t>::parseArgument(const char* optstr, const char* optarg)
{
char* endptr = nullptr;
value = std::strtoul(optarg, &endptr, 0);
if (endptr == nullptr || *endptr != '\0')
{
throwError(optstr, optarg);
}
}
template <>
void Option<uint64_t>::parseArgument(const char* optstr, const char* optarg)
{
char* endptr = nullptr;
value = std::strtoul(optarg, &endptr, 0);
if (endptr == nullptr || *endptr != '\0')
{
throwError(optstr, optarg);
}
}
template <>
void Option<bool>::parseArgument(const char* optstr, const char* optarg)
{
string str(optarg);
std::transform(str.begin(), str.end(), str.begin(), std::ptr_fun<int, int>(std::tolower));
if (str == "false" || str == "0" || str == "no" || str == "n" || str == "off" || str == "disable" || str == "disabled")
{
value = false;
}
else if (str == "true" || str == "1" || str == "yes" || str == "y" || str == "on" || str == "enable" || str == "enabled")
{
value = true;
}
else
{
throwError(optstr, optarg);
}
}
template <>
void Option<const char*>::parseArgument(const char*, const char* optarg)
{
value = optarg;
}
struct Range: public Option<uint64_t>
{
uint64_t lower;
uint64_t upper;
Range(uint64_t& value, uint64_t lo, uint64_t hi, const char* name, const char* description, const char* dv)
: Option<uint64_t>(value, "count", name, description, dv)
, lower(lo)
, upper(hi)
{ }
void throwError(const char*, const char*) const override
{
if (upper != 0 && lower != 0)
{
throw string("Option ") + name + string(" expects a value between ") + std::to_string(lower) + " and " + std::to_string(upper);
}
else if (lower != 0)
{
throw string("Option ") + name + string(" must be at least ") + std::to_string(lower);
}
throw string("Option ") + name + string(" must lower than ") + std::to_string(upper);
}
void parseArgument(const char* optstr, const char* optarg) override
{
Option<uint64_t>::parseArgument(optstr, optarg);
if (lower != 0 && value < lower)
{
throwError(optstr, optarg);
}
if (upper != 0 && value > upper)
{
throwError(optstr, optarg);
}
}
};
static void setBDF(Settings& settings)
{
hipDeviceProp_t props;
hipError_t err = hipGetDeviceProperties(&props, settings.cudaDevice);
if (err != hipSuccess)
{
throw string("Failed to get device properties: ") + hipGetErrorString(err);
}
settings.domain = props.pciDomainID;
settings.bus = props.pciBusID;
settings.devfn = props.pciDeviceID;
}
string Settings::getDeviceBDF() const
{
using namespace std;
ostringstream s;
s << setfill('0') << setw(4) << hex << domain
<< ":" << setfill('0') << setw(2) << hex << bus
<< ":" << setfill('0') << setw(2) << hex << devfn
<< ".0";
return s.str();
}
string Settings::usageString(const string& name)
{
return "Usage: " + name + " --ctrl=identifier [options]\n"
+ " or: " + name + " --block-device=path [options]";
}
static string helpString(const string& /*name*/, OptionMap& options)
{
using namespace std;
ostringstream s;
s << "" << left
<< setw(16) << "OPTION"
<< setw(2) << " "
<< setw(16) << "TYPE"
<< setw(10) << "DEFAULT"
<< setw(36) << "DESCRIPTION"
<< endl;
for (const auto& optPair: options)
{
const auto& opt = optPair.second;
s << " " << left
<< setw(16) << opt->name
<< setw(16) << opt->type
<< setw(10) << (opt->defaultValue != nullptr ? opt->defaultValue : "")
<< setw(36) << opt->description
<< endl;
}
return s.str();
}
static void createLongOptions(vector<option>& options, string& optionString, const OptionMap& parsers)
{
options.push_back(option{ .name = "help", .has_arg = no_argument, .flag = nullptr, .val = 'h' });
optionString = ":h";
for (const auto& parserPair: parsers)
{
int shortOpt = parserPair.first;
const OptionPtr& parser = parserPair.second;
option opt;
opt.name = parser->name;
opt.has_arg = parser->hasArgument;
opt.flag = nullptr;
opt.val = shortOpt;
options.push_back(opt);
if ('0' <= shortOpt && shortOpt <= 'z')
{
optionString += (char) shortOpt;
if (parser->hasArgument == required_argument)
{
optionString += ":";
}
}
}
options.push_back(option{ .name = nullptr, .has_arg = 0, .flag = nullptr, .val = 0 });
}
static void verifyCudaDevice(int device)
{
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
if (err != hipSuccess)
{
throw string("Unexpected error: ") + hipGetErrorString(err);
}
if (device < 0 || device >= deviceCount)
{
throw string("Invalid CUDA device: ") + std::to_string(device);
}
}
static void verifyNumberOfThreads(size_t numThreads)
{
size_t i = 0;
while ((1ULL << i) <= 32)
{
if ((1ULL << i) == numThreads)
{
return;
}
++i;
}
throw string("Invalid number of threads, must be a power of 2");
}
void Settings::parseArguments(int argc, char** argv)
{
OptionMap parsers = {
#ifdef __DIS_CLUSTER__
{'c', OptionPtr(new Option<uint64_t>(controllerId, "fdid", "ctrl", "NVM controller device identifier"))},
{'f', OptionPtr(new Option<uint64_t>(cudaDeviceId, "fdid", "fdid", "CUDA device FDID"))},
{'a', OptionPtr(new Option<uint32_t>(adapter, "number", "adapter", "DIS adapter number", "0"))},
{'S', OptionPtr(new Option<uint32_t>(segmentId, "offset", "segment", "DIS segment identifier offset", "0"))},
#else
{'c', OptionPtr(new Option<const char*>(controllerPath, "path", "ctrl", "NVM controller device path"))},
#endif
{'g', OptionPtr(new Option<uint32_t>(cudaDevice, "number", "gpu", "specify CUDA device", "0"))},
{'i', OptionPtr(new Option<uint32_t>(nvmNamespace, "identifier", "namespace", "NVM namespace identifier", "1"))},
{'B', OptionPtr(new Option<bool>(doubleBuffered, "bool", "double-buffer", "double buffer disk reads", "false"))},
{'r', OptionPtr(new Option<bool>(stats, "bool", "stats", "print statistics", "false"))},
{'n', OptionPtr(new Range(numChunks, 1, 0, "chunks", "number of chunks per thread", "32"))},
{'p', OptionPtr(new Range(numPages, 1, 0, "pages", "number of pages per chunk", "1"))},
{'t', OptionPtr(new Range(numThreads, 1, 32, "threads", "number of CUDA threads", "32"))},
{'o', OptionPtr(new Option<const char*>(output, "path", "output", "output read data to file"))},
{'s', OptionPtr(new Option<uint64_t>(startBlock, "offset", "offset", "number of blocks to offset", "0"))},
{'b', OptionPtr(new Option<const char*>(blockDevicePath, "path", "block-device", "path to block device"))}
};
string optionString;
vector<option> options;
createLongOptions(options, optionString, parsers);
int index;
int option;
OptionMap::iterator parser;
while ((option = getopt_long(argc, argv, optionString.c_str(), &options[0], &index)) != -1)
{
switch (option)
{
case '?':
throw string("Unknown option: `") + argv[optind - 1] + string("'");
case ':':
throw string("Missing argument for option `") + argv[optind - 1] + string("'");
case 'h':
throw helpString(argv[0], parsers);
default:
parser = parsers.find(option);
if (parser == parsers.end())
{
throw string("Unknown option: `") + argv[optind - 1] + string("'");
}
parser->second->parseArgument(argv[optind - 1], optarg);
break;
}
}
#ifdef __DIS_CLUSTER__
if (blockDevicePath == nullptr && controllerId == 0)
{
throw string("No block device or NVM controller specified");
}
else if (blockDevicePath != nullptr && controllerId != 0)
{
throw string("Either block device or NVM controller must be specified, not both!");
}
#else
if (blockDevicePath == nullptr && controllerPath == nullptr)
{
throw string("No block device or NVM controller specified");
}
else if (blockDevicePath != nullptr && controllerPath != nullptr)
{
throw string("Either block device or NVM controller must be specified, not both!");
}
#endif
if (blockDevicePath != nullptr && doubleBuffered)
{
throw string("Double buffered reading from block device is not supported");
}
verifyCudaDevice(cudaDevice);
verifyNumberOfThreads(numThreads);
setBDF(*this);
}
Settings::Settings()
{
cudaDevice = 0;
cudaDeviceId = 0;
blockDevicePath = nullptr;
controllerPath = nullptr;
controllerId = 0;
adapter = 0;
segmentId = 0;
nvmNamespace = 1;
doubleBuffered = false;
numChunks = 32;
numPages = 1;
startBlock = 0;
stats = false;
output = nullptr;
numThreads = 32;
domain = 0;
bus = 0;
devfn = 0;
}
|
b5ce393be0456337be3b52a73bfa7b73cf7d0276.cu
|
#include <cuda.h>
#include "settings.h"
#include <iomanip>
#include <string>
#include <sstream>
#include <vector>
#include <map>
#include <memory>
#include <algorithm>
#include <functional>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <getopt.h>
struct OptionIface;
using std::string;
using std::vector;
using std::make_shared;
typedef std::shared_ptr<OptionIface> OptionPtr;
typedef std::map<int, OptionPtr> OptionMap;
struct OptionIface
{
const char* type;
const char* name;
const char* description;
const char* defaultValue;
int hasArgument;
virtual ~OptionIface() = default;
OptionIface(const char* type, const char* name, const char* description)
: type(type), name(name), description(description), hasArgument(no_argument) { }
OptionIface(const char* type, const char* name, const char* description, const char* dvalue)
: type(type), name(name), description(description), defaultValue(dvalue), hasArgument(no_argument) { }
virtual void parseArgument(const char* optstr, const char* optarg) = 0;
virtual void throwError(const char*, const char* optarg) const
{
throw string("Option ") + name + string(" expects a ") + type + string(", but got `") + optarg + string("'");
}
};
template <typename T>
struct Option: public OptionIface
{
T& value;
Option() = delete;
Option(Option&& rhs) = delete;
Option(const Option& rhs) = delete;
Option(T& value, const char* type, const char* name, const char* description)
: OptionIface(type, name, description)
, value(value)
{
hasArgument = required_argument;
}
Option(T& value, const char* type, const char* name, const char* description, const char* dvalue)
: OptionIface(type, name, description, dvalue)
, value(value)
{
hasArgument = required_argument;
}
void parseArgument(const char* optstr, const char* optarg) override;
};
template <>
void Option<uint32_t>::parseArgument(const char* optstr, const char* optarg)
{
char* endptr = nullptr;
value = std::strtoul(optarg, &endptr, 0);
if (endptr == nullptr || *endptr != '\0')
{
throwError(optstr, optarg);
}
}
template <>
void Option<uint64_t>::parseArgument(const char* optstr, const char* optarg)
{
char* endptr = nullptr;
value = std::strtoul(optarg, &endptr, 0);
if (endptr == nullptr || *endptr != '\0')
{
throwError(optstr, optarg);
}
}
template <>
void Option<bool>::parseArgument(const char* optstr, const char* optarg)
{
string str(optarg);
std::transform(str.begin(), str.end(), str.begin(), std::ptr_fun<int, int>(std::tolower));
if (str == "false" || str == "0" || str == "no" || str == "n" || str == "off" || str == "disable" || str == "disabled")
{
value = false;
}
else if (str == "true" || str == "1" || str == "yes" || str == "y" || str == "on" || str == "enable" || str == "enabled")
{
value = true;
}
else
{
throwError(optstr, optarg);
}
}
template <>
void Option<const char*>::parseArgument(const char*, const char* optarg)
{
value = optarg;
}
struct Range: public Option<uint64_t>
{
uint64_t lower;
uint64_t upper;
Range(uint64_t& value, uint64_t lo, uint64_t hi, const char* name, const char* description, const char* dv)
: Option<uint64_t>(value, "count", name, description, dv)
, lower(lo)
, upper(hi)
{ }
void throwError(const char*, const char*) const override
{
if (upper != 0 && lower != 0)
{
throw string("Option ") + name + string(" expects a value between ") + std::to_string(lower) + " and " + std::to_string(upper);
}
else if (lower != 0)
{
throw string("Option ") + name + string(" must be at least ") + std::to_string(lower);
}
throw string("Option ") + name + string(" must lower than ") + std::to_string(upper);
}
void parseArgument(const char* optstr, const char* optarg) override
{
Option<uint64_t>::parseArgument(optstr, optarg);
if (lower != 0 && value < lower)
{
throwError(optstr, optarg);
}
if (upper != 0 && value > upper)
{
throwError(optstr, optarg);
}
}
};
static void setBDF(Settings& settings)
{
cudaDeviceProp props;
cudaError_t err = cudaGetDeviceProperties(&props, settings.cudaDevice);
if (err != cudaSuccess)
{
throw string("Failed to get device properties: ") + cudaGetErrorString(err);
}
settings.domain = props.pciDomainID;
settings.bus = props.pciBusID;
settings.devfn = props.pciDeviceID;
}
string Settings::getDeviceBDF() const
{
using namespace std;
ostringstream s;
s << setfill('0') << setw(4) << hex << domain
<< ":" << setfill('0') << setw(2) << hex << bus
<< ":" << setfill('0') << setw(2) << hex << devfn
<< ".0";
return s.str();
}
string Settings::usageString(const string& name)
{
return "Usage: " + name + " --ctrl=identifier [options]\n"
+ " or: " + name + " --block-device=path [options]";
}
static string helpString(const string& /*name*/, OptionMap& options)
{
using namespace std;
ostringstream s;
s << "" << left
<< setw(16) << "OPTION"
<< setw(2) << " "
<< setw(16) << "TYPE"
<< setw(10) << "DEFAULT"
<< setw(36) << "DESCRIPTION"
<< endl;
for (const auto& optPair: options)
{
const auto& opt = optPair.second;
s << " " << left
<< setw(16) << opt->name
<< setw(16) << opt->type
<< setw(10) << (opt->defaultValue != nullptr ? opt->defaultValue : "")
<< setw(36) << opt->description
<< endl;
}
return s.str();
}
static void createLongOptions(vector<option>& options, string& optionString, const OptionMap& parsers)
{
options.push_back(option{ .name = "help", .has_arg = no_argument, .flag = nullptr, .val = 'h' });
optionString = ":h";
for (const auto& parserPair: parsers)
{
int shortOpt = parserPair.first;
const OptionPtr& parser = parserPair.second;
option opt;
opt.name = parser->name;
opt.has_arg = parser->hasArgument;
opt.flag = nullptr;
opt.val = shortOpt;
options.push_back(opt);
if ('0' <= shortOpt && shortOpt <= 'z')
{
optionString += (char) shortOpt;
if (parser->hasArgument == required_argument)
{
optionString += ":";
}
}
}
options.push_back(option{ .name = nullptr, .has_arg = 0, .flag = nullptr, .val = 0 });
}
static void verifyCudaDevice(int device)
{
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
if (err != cudaSuccess)
{
throw string("Unexpected error: ") + cudaGetErrorString(err);
}
if (device < 0 || device >= deviceCount)
{
throw string("Invalid CUDA device: ") + std::to_string(device);
}
}
static void verifyNumberOfThreads(size_t numThreads)
{
size_t i = 0;
while ((1ULL << i) <= 32)
{
if ((1ULL << i) == numThreads)
{
return;
}
++i;
}
throw string("Invalid number of threads, must be a power of 2");
}
void Settings::parseArguments(int argc, char** argv)
{
OptionMap parsers = {
#ifdef __DIS_CLUSTER__
{'c', OptionPtr(new Option<uint64_t>(controllerId, "fdid", "ctrl", "NVM controller device identifier"))},
{'f', OptionPtr(new Option<uint64_t>(cudaDeviceId, "fdid", "fdid", "CUDA device FDID"))},
{'a', OptionPtr(new Option<uint32_t>(adapter, "number", "adapter", "DIS adapter number", "0"))},
{'S', OptionPtr(new Option<uint32_t>(segmentId, "offset", "segment", "DIS segment identifier offset", "0"))},
#else
{'c', OptionPtr(new Option<const char*>(controllerPath, "path", "ctrl", "NVM controller device path"))},
#endif
{'g', OptionPtr(new Option<uint32_t>(cudaDevice, "number", "gpu", "specify CUDA device", "0"))},
{'i', OptionPtr(new Option<uint32_t>(nvmNamespace, "identifier", "namespace", "NVM namespace identifier", "1"))},
{'B', OptionPtr(new Option<bool>(doubleBuffered, "bool", "double-buffer", "double buffer disk reads", "false"))},
{'r', OptionPtr(new Option<bool>(stats, "bool", "stats", "print statistics", "false"))},
{'n', OptionPtr(new Range(numChunks, 1, 0, "chunks", "number of chunks per thread", "32"))},
{'p', OptionPtr(new Range(numPages, 1, 0, "pages", "number of pages per chunk", "1"))},
{'t', OptionPtr(new Range(numThreads, 1, 32, "threads", "number of CUDA threads", "32"))},
{'o', OptionPtr(new Option<const char*>(output, "path", "output", "output read data to file"))},
{'s', OptionPtr(new Option<uint64_t>(startBlock, "offset", "offset", "number of blocks to offset", "0"))},
{'b', OptionPtr(new Option<const char*>(blockDevicePath, "path", "block-device", "path to block device"))}
};
string optionString;
vector<option> options;
createLongOptions(options, optionString, parsers);
int index;
int option;
OptionMap::iterator parser;
while ((option = getopt_long(argc, argv, optionString.c_str(), &options[0], &index)) != -1)
{
switch (option)
{
case '?':
throw string("Unknown option: `") + argv[optind - 1] + string("'");
case ':':
throw string("Missing argument for option `") + argv[optind - 1] + string("'");
case 'h':
throw helpString(argv[0], parsers);
default:
parser = parsers.find(option);
if (parser == parsers.end())
{
throw string("Unknown option: `") + argv[optind - 1] + string("'");
}
parser->second->parseArgument(argv[optind - 1], optarg);
break;
}
}
#ifdef __DIS_CLUSTER__
if (blockDevicePath == nullptr && controllerId == 0)
{
throw string("No block device or NVM controller specified");
}
else if (blockDevicePath != nullptr && controllerId != 0)
{
throw string("Either block device or NVM controller must be specified, not both!");
}
#else
if (blockDevicePath == nullptr && controllerPath == nullptr)
{
throw string("No block device or NVM controller specified");
}
else if (blockDevicePath != nullptr && controllerPath != nullptr)
{
throw string("Either block device or NVM controller must be specified, not both!");
}
#endif
if (blockDevicePath != nullptr && doubleBuffered)
{
throw string("Double buffered reading from block device is not supported");
}
verifyCudaDevice(cudaDevice);
verifyNumberOfThreads(numThreads);
setBDF(*this);
}
Settings::Settings()
{
cudaDevice = 0;
cudaDeviceId = 0;
blockDevicePath = nullptr;
controllerPath = nullptr;
controllerId = 0;
adapter = 0;
segmentId = 0;
nvmNamespace = 1;
doubleBuffered = false;
numChunks = 32;
numPages = 1;
startBlock = 0;
stats = false;
output = nullptr;
numThreads = 32;
domain = 0;
bus = 0;
devfn = 0;
}
|
df4b66b35b51eb5964a93a45c7c8fc9a1e9066c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
df4b66b35b51eb5964a93a45c7c8fc9a1e9066c1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cpy<<<gridBlock,threadBlock>>>(a,b,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cpy<<<gridBlock,threadBlock>>>(a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cpy<<<gridBlock,threadBlock>>>(a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4412aeedd7902016c570697108a9b16da4057354.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/partition.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
//
// Partition splits the string at the first occurrence of delimiter, and returns 3 elements
// containing the part before the delimiter, the delimiter itself, and the part after the delimiter.
// If the delimiter is not found, return 3 elements containing the string itself, followed by two
// empty strings.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = partition(strs,"_")
// col0 col1 col2
// 0 abcde "" ""
// 1 null null null
// 2 a _ bc_df
// 3 a _ _bc
// 4 "" _ ab_cd
// 5 ab _ cd_
//
struct partition_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
string_index_pair* d_indices_left{}; // the three
string_index_pair* d_indices_delim{}; // output columns
string_index_pair* d_indices_right{}; // amigos
partition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: d_strings(d_strings),
d_delimiter(d_delimiter),
d_indices_left(indices_left.data()),
d_indices_delim(indices_delim.data()),
d_indices_right(indices_right.data())
{
}
__device__ void set_null_entries(size_type idx)
{
if (d_indices_left) {
d_indices_left[idx] = string_index_pair{nullptr, 0};
d_indices_delim[idx] = string_index_pair{nullptr, 0};
d_indices_right[idx] = string_index_pair{nullptr, 0};
}
}
__device__ size_type check_delimiter(size_type idx,
string_view const& d_str,
string_view::const_iterator& itr)
{
size_type offset = itr.byte_offset();
size_type pos = -1;
if (d_delimiter.empty()) {
if (*itr <= ' ') // whitespace delimited
pos = offset;
} else {
auto bytes = ::min(d_str.size_bytes() - offset, d_delimiter.size_bytes());
if (d_delimiter.compare(d_str.data() + offset, bytes) == 0) pos = offset;
}
if (pos >= 0) // delimiter found, set results
{
d_indices_left[idx] = string_index_pair{d_str.data(), offset};
if (d_delimiter.empty()) {
d_indices_delim[idx] = string_index_pair{d_str.data() + offset, 1};
++offset;
} else {
d_indices_delim[idx] = string_index_pair{d_delimiter.data(), d_delimiter.size_bytes()};
offset += d_delimiter.size_bytes();
}
d_indices_right[idx] = string_index_pair{d_str.data() + offset, d_str.size_bytes() - offset};
}
return pos;
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
for (auto itr = d_str.begin(); (pos < 0) && (itr < d_str.end()); ++itr)
pos = check_delimiter(idx, d_str, itr);
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
d_indices_delim[idx] = string_index_pair{"", 0}; // two empty
d_indices_right[idx] = string_index_pair{"", 0}; // strings added
}
}
};
//
// This follows most of the same logic as partition above except that the delimiter
// search starts from the end of each string. Also, if no delimiter is found the
// resulting array includes two empty strings followed by the original string.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = rpartition(strs,"_")
// col0 col1 col2
// 0 "" "" abcde
// 1 null null null
// 2 a_bc _ df
// 3 a_ _ bc
// 4 ab _ cd
// 5 ab_cd _ ""
//
struct rpartition_fn : public partition_fn {
rpartition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: partition_fn(d_strings, d_delimiter, indices_left, indices_delim, indices_right)
{
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
auto itr = d_str.end();
while ((pos < 0) && (d_str.begin() < itr)) {
--itr;
pos = check_delimiter(idx, d_str, itr);
}
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{"", 0}; // two empty
d_indices_delim[idx] = string_index_pair{"", 0}; // strings
d_indices_right[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
}
}
};
} // namespace
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
partition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
rpartition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
} // namespace detail
// external APIs
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(strings, delimiter, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rpartition(strings, delimiter, cudf::get_default_stream(), mr);
}
} // namespace strings
} // namespace cudf
|
4412aeedd7902016c570697108a9b16da4057354.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/partition.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
//
// Partition splits the string at the first occurrence of delimiter, and returns 3 elements
// containing the part before the delimiter, the delimiter itself, and the part after the delimiter.
// If the delimiter is not found, return 3 elements containing the string itself, followed by two
// empty strings.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = partition(strs,"_")
// col0 col1 col2
// 0 abcde "" ""
// 1 null null null
// 2 a _ bc_déf
// 3 a _ _bc
// 4 "" _ ab_cd
// 5 ab _ cd_
//
struct partition_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
string_index_pair* d_indices_left{}; // the three
string_index_pair* d_indices_delim{}; // output columns
string_index_pair* d_indices_right{}; // amigos
partition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: d_strings(d_strings),
d_delimiter(d_delimiter),
d_indices_left(indices_left.data()),
d_indices_delim(indices_delim.data()),
d_indices_right(indices_right.data())
{
}
__device__ void set_null_entries(size_type idx)
{
if (d_indices_left) {
d_indices_left[idx] = string_index_pair{nullptr, 0};
d_indices_delim[idx] = string_index_pair{nullptr, 0};
d_indices_right[idx] = string_index_pair{nullptr, 0};
}
}
__device__ size_type check_delimiter(size_type idx,
string_view const& d_str,
string_view::const_iterator& itr)
{
size_type offset = itr.byte_offset();
size_type pos = -1;
if (d_delimiter.empty()) {
if (*itr <= ' ') // whitespace delimited
pos = offset;
} else {
auto bytes = std::min(d_str.size_bytes() - offset, d_delimiter.size_bytes());
if (d_delimiter.compare(d_str.data() + offset, bytes) == 0) pos = offset;
}
if (pos >= 0) // delimiter found, set results
{
d_indices_left[idx] = string_index_pair{d_str.data(), offset};
if (d_delimiter.empty()) {
d_indices_delim[idx] = string_index_pair{d_str.data() + offset, 1};
++offset;
} else {
d_indices_delim[idx] = string_index_pair{d_delimiter.data(), d_delimiter.size_bytes()};
offset += d_delimiter.size_bytes();
}
d_indices_right[idx] = string_index_pair{d_str.data() + offset, d_str.size_bytes() - offset};
}
return pos;
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
for (auto itr = d_str.begin(); (pos < 0) && (itr < d_str.end()); ++itr)
pos = check_delimiter(idx, d_str, itr);
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
d_indices_delim[idx] = string_index_pair{"", 0}; // two empty
d_indices_right[idx] = string_index_pair{"", 0}; // strings added
}
}
};
//
// This follows most of the same logic as partition above except that the delimiter
// search starts from the end of each string. Also, if no delimiter is found the
// resulting array includes two empty strings followed by the original string.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = rpartition(strs,"_")
// col0 col1 col2
// 0 "" "" abcde
// 1 null null null
// 2 a_bc _ déf
// 3 a_ _ bc
// 4 ab _ cd
// 5 ab_cd _ ""
//
struct rpartition_fn : public partition_fn {
rpartition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: partition_fn(d_strings, d_delimiter, indices_left, indices_delim, indices_right)
{
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
auto itr = d_str.end();
while ((pos < 0) && (d_str.begin() < itr)) {
--itr;
pos = check_delimiter(idx, d_str, itr);
}
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{"", 0}; // two empty
d_indices_delim[idx] = string_index_pair{"", 0}; // strings
d_indices_right[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
}
}
};
} // namespace
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
partition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
rpartition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
} // namespace detail
// external APIs
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(strings, delimiter, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rpartition(strings, delimiter, cudf::get_default_stream(), mr);
}
} // namespace strings
} // namespace cudf
|
8ca8bd0a6b8f397c78fa25fbd78475639447fb39.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "Device.h"
#include "Montecarlo.h"
using std::cout;
using std::endl;
extern __global__ void montecarlo(hiprandState_t* ptrTabGen, int nbFlecheByThread, int* ptrDevNxTotal);
Montecarlo::Montecarlo(const Grid& grid)
{
this->dg = grid.dg;
this->db = grid.db;
this->sizeResultGM = sizeof(int);
this->sizeTabSM = db.x * sizeof(int);
nbFlecheByThread = INT_MAX;
result = 0;
Device::malloc(&ptrDevNxTotal, sizeResultGM);
hipMemset(ptrDevNxTotal, 0,sizeResultGM);
}
Montecarlo::~Montecarlo(void)
{
Device::free(ptrDevNxTotal);
}
void Montecarlo::run()
{
Device::lastCudaError("montecarlo (before)"); // temp debug
hipLaunchKernelGGL(( montecarlo), dim3(dg),dim3(db), sizeTabSM , 0, ptrDevNxTotal,nbFlecheByThread); // assynchrone
Device::lastCudaError("montecarlo (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
Device::memcpyDToH(&result, ptrDevNxTotal, sizeResultGM); // barriere synchronisation implicite
}
int Montecarlo::getResult(){
return result;
}
|
8ca8bd0a6b8f397c78fa25fbd78475639447fb39.cu
|
#include <iostream>
#include "Device.h"
#include "Montecarlo.h"
using std::cout;
using std::endl;
extern __global__ void montecarlo(curandState* ptrTabGen, int nbFlecheByThread, int* ptrDevNxTotal);
Montecarlo::Montecarlo(const Grid& grid)
{
this->dg = grid.dg;
this->db = grid.db;
this->sizeResultGM = sizeof(int);
this->sizeTabSM = db.x * sizeof(int);
nbFlecheByThread = INT_MAX;
result = 0;
Device::malloc(&ptrDevNxTotal, sizeResultGM);
cudaMemset(ptrDevNxTotal, 0,sizeResultGM);
}
Montecarlo::~Montecarlo(void)
{
Device::free(ptrDevNxTotal);
}
void Montecarlo::run()
{
Device::lastCudaError("montecarlo (before)"); // temp debug
montecarlo<<<dg,db, sizeTabSM >>>(ptrDevNxTotal,nbFlecheByThread); // assynchrone
Device::lastCudaError("montecarlo (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
Device::memcpyDToH(&result, ptrDevNxTotal, sizeResultGM); // barriere synchronisation implicite
}
int Montecarlo::getResult(){
return result;
}
|
0f5db54978098f4487bcca89ecf666a2003fdeb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "grayScale3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uchar3 *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
uchar3 *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
grayScale3), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
grayScale3), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
grayScale3), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0f5db54978098f4487bcca89ecf666a2003fdeb4.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "grayScale3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uchar3 *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
uchar3 *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
grayScale3<<<gridBlock,threadBlock>>>(input,output,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
grayScale3<<<gridBlock,threadBlock>>>(input,output,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
grayScale3<<<gridBlock,threadBlock>>>(input,output,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4b82c94a4bd83cf4c5f1a47c3521084e82507b98.hip
|
// !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <hip/hip_runtime_api.h>
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "../../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(float *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
float *rank_array = (float *)malloc(num_nodes * sizeof(float));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
float *pagerank_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for pagerank
err = hipMalloc(&pagerank_d, num_nodes * sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc pagerank_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<float, float> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<float, float> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run PageRank for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start PageRank\n");
hipLaunchKernelGGL(( PageRank), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish PageRank\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, pagerank_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, pagerank_d, num_nodes * sizeof(float),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(pagerank_d);
return 0;
}
void print_vectorf(float *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
4b82c94a4bd83cf4c5f1a47c3521084e82507b98.cu
|
// clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <cuda_runtime_api.h>
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "../../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(float *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
float *rank_array = (float *)malloc(num_nodes * sizeof(float));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
float *pagerank_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for pagerank
err = cudaMalloc(&pagerank_d, num_nodes * sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc pagerank_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<float, float> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<float, float> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run PageRank for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start PageRank\n");
PageRank<<<grid, threads>>>(vertex, context, i);
printf("Finish PageRank\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, pagerank_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, pagerank_d, num_nodes * sizeof(float),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(pagerank_d);
return 0;
}
void print_vectorf(float *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
e13c9aea692988a9dd0ccb4ce963cdedb2e6520f.hip
|
// !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
#include "complex_arith.cu"
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
__constant__ Info_Stat info_stat;
__constant__ SHORT nL_dev;
__constant__ complex_double diag_terms_dev[MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL];
/* Just think of the vox indices as (blk_dep*BLK_SIZE + dep + info_stat.bounZ, blk_row*BLK_SIZE + row + info_stat.bounY, blk_col * BLK_SIZE + col + info_stat.nX) */
__device__ int get_voxind_spind(int blk_dep, int blk_row,int blk_col, int dep, int row, int col, int cnt){
return VOX_TO_SPIND( ((blk_dep + dep + info_stat.bounZ)*(2*info_stat.bounX + info_stat.nX) * (2*info_stat.bounY + info_stat.nY) + (blk_row*BLK_SIZE + row + info_stat.bounY)* (2*info_stat.bounX + info_stat.nX) + (blk_col * BLK_SIZE + col + info_stat.bounX)), cnt, (nL_dev+1)*(nL_dev+1));
}
__device__ int get_vind_phanind(int dep, int row, int col){
return ((info_stat.bounZ + dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + ( info_stat.bounY + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + col));
}
__device__ int get_voxind_phanind(int blk_dep, int blk_row,int blk_col,int blk_offset_i, int blk_offset_j, int blk_offset_k, int dep, int row, int col){
return (info_stat.bounZ + blk_dep + blk_offset_i * BLK_SIZE+ dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + (blk_row * BLK_SIZE + info_stat.bounY + blk_offset_j * BLK_SIZE + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + blk_col * BLK_SIZE + blk_offset_k * BLK_SIZE + col);
}
__device__ int get_ind_phanind(int dep, int row, int col){
return dep* (2*info_stat.boun_blk_sizeX + BLK_SIZE)* (BLK_SIZE + 2*info_stat.boun_blk_sizeY) + row* (BLK_SIZE + 2*info_stat.boun_blk_sizeX) + col;
}
__device__ flt_doub
mu_int (COR *cor_src, COR *cor_dest, float r, Info_Dyn info_dyn)
{
flt_doub mu_tot_path;
mu_tot_path = 0.0;
float alpha_x_curr, alpha_y_curr, alpha_z_curr;
float alpha_xinc, alpha_yinc, alpha_zinc;
SHORT i_curr, j_curr, k_curr;
float alpha_prev;
byte flag = 1;
alpha_x_curr = 0;
alpha_y_curr = 0;
alpha_z_curr = 0;
alpha_prev = 0;
SHORT x_inc_end,y_inc_end,z_inc_end;
i_curr = cor_src->i;
j_curr = cor_src->j;
k_curr = cor_src->k;
if (cor_dest->i != cor_src->i)
alpha_zinc = fabs (0.5 / (cor_dest->i - cor_src->i)); // The initial increment along the z axis.
else
alpha_zinc = INF;
if (cor_dest->j != cor_src->j)
alpha_yinc = fabs (0.5 / (cor_dest->j - cor_src->j));
else
alpha_yinc = INF;
if (cor_dest->k != cor_src->k)
alpha_xinc = fabs (0.5 / (cor_dest->k - cor_src->k));
else
alpha_xinc = INF;
#if 1
while (flag == 1 && alpha_prev < 1) // Hack for now to avoid infinite loops
{
if (alpha_z_curr + alpha_zinc <= alpha_x_curr + alpha_xinc && alpha_z_curr + alpha_zinc <= alpha_y_curr + alpha_yinc)
{
alpha_z_curr += alpha_zinc;
if ( i_curr == cor_src->i)
alpha_zinc *=2; // We have taken the first step along the z axis, which was half a voxel. Now every step will be one voxel.
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_z_curr - alpha_prev);
i_curr = (cor_src->i < cor_dest->i) ? i_curr + 1 : i_curr - 1;
alpha_prev = alpha_z_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_zinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_y_curr + alpha_yinc < alpha_z_curr + alpha_zinc && alpha_y_curr + alpha_yinc <= alpha_x_curr + alpha_xinc )
{
alpha_y_curr += alpha_yinc;
if ( j_curr == cor_src->j)
alpha_yinc *=2;
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_y_curr - alpha_prev);
j_curr = (cor_src->j < cor_dest->j) ? j_curr + 1 : j_curr - 1;
alpha_prev = alpha_y_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_yinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_x_curr + alpha_xinc < alpha_y_curr + alpha_yinc && alpha_x_curr + alpha_xinc < alpha_z_curr + alpha_zinc )
{
alpha_x_curr += alpha_xinc;
if ( k_curr == cor_src->k)
alpha_xinc *=2;
mu_tot_path = mu_tot_path + (info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]]) * r * (alpha_x_curr - alpha_prev);
k_curr = (cor_src->k < cor_dest->k) ? k_curr + 1 : k_curr - 1;
alpha_prev = alpha_x_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_xinc/2.0;
flag = 0;
return mu_tot_path;
}
}
}
#endif
return mu_tot_path;
}
__global__ void compute_diagonal_abs (doublecomplex *src_dist,doublecomplex *out_dist,Info_Dyn info_dyn, int cnt,SHORT block_dep, SHORT layer_start, SHORT flag)
{
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
int sp_ind, sp_ind_src, r_ind;
SHORT cntp;
SHORT nL_tmp;
flt_doub cm_tmp = 1.0;
nL_tmp = (flag == 0) ? 0 : nL_dev;
// cm_tmp = (flag == 0) ? 1 : info_stat.cm
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for(cnt=0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col ,i,j,k,cnt);
out_dist[sp_ind] = 0 + 0*I;
for(cntp=0; cntp<(nL_tmp+1)*(nL_tmp+1) ; cntp++){
sp_ind_src = get_voxind_spind(block_dep + layer_start, block_row, block_col,i,j,k,cntp);
out_dist[sp_ind] = out_dist[sp_ind] + (1.0/cm_tmp)*diag_terms_dev[cnt * (MAX_NL) * MAX_NL * MAX_TISS_NUM + cntp * MAX_TISS_NUM + info_dyn.tiss_type[r_ind]] * src_dist[sp_ind_src];
}
}
}
#if 0
//extern __shared__ char array_tisstype[];
__global__ void compute_subvox_2(complex_double* src_dist_dev,complex_double* out_dist_dev, Info_Dyn info_dyn, complex_double* sph_harm_dev, SHORT cnt, COR subvox_src,COR subvox_dest, flt_doub dz_sub, flt_doub dy_sub, flt_doub dx_sub, SHORT blk_dep, SHORT start_layer){
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
SHORT cntp;
complex_double out_tmp, tmp;
COR cor_src, cor_dest;
SHORT ip, jp, kp;
int sp_ind;
flt_doub theta,phi,dist,dx,dy,dz;
int sp_ind_src;
#if 0
cor_src.i = i + info_stat.boun_blk_sizeZ;
cor_src.j = j + info_stat.boun_blk_sizeY;
cor_src.k = k + info_stat.boun_blk_sizeX;
#else
cor_src.i = i + blk_dep + start_layer;
cor_src.j = j + block_row*BLK_SIZE;
cor_src.k = k + block_col*BLK_SIZE;
#endif
// __shared__ complex_double src_tmp[BLK_SRC_SIZE];
#if 0
int blk_offset_i, blk_offset_j,blk_offset_k;
byte *tisstype_tmp = (byte *) array_tisstype;
for(blk_offset_i=0; blk_offset_i< 1 + 2*info_stat.boun_blkZ; blk_offset_i++){
for(blk_offset_j=0; blk_offset_j< 1 + 2*info_stat.boun_blkY; blk_offset_j++){
for(blk_offset_k=0; blk_offset_k< 1 + 2*info_stat.boun_blkX; blk_offset_k++){
tisstype_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = info_dyn.tiss_type[get_voxind_phanind(blk_dep, block_row, block_col,blk_offset_i - info_stat.boun_blkZ,blk_offset_j - info_stat.boun_blkY,blk_offset_k - info_stat.boun_blkX,i,j,k)];
// src_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = src_dist_dev[VOX_TO_SPIND(get_voxind_phanind(blk_dep, block_row, block_col,(blk_offset_i - info_stat.bounZ/BLK_SIZE),(blk_offset_j - info_stat.bounY/BLK_SIZE),(blk_offset_k - info_stat.bounX/BLK_SIZE),i,j,k),cntp,info_stat.no_vox)];
}
}
}
__syncthreads();
#endif
out_tmp = 0 + 0*I;
flt_doub sub_dist;
for(ip= i - info_stat.subbounZ; ip <= i + info_stat.subbounZ; ip++){
dz = -(ip-i)*info_stat.delZ + dz_sub;
for(jp= j - info_stat.subbounY; jp <= j + info_stat.subbounY; jp++){
dy = -(jp-j)*info_stat.delY + dy_sub;
for(kp= k - info_stat.subbounX; kp <= k + info_stat.subbounX; kp++){
dx = -(kp-k)*info_stat.delX + dx_sub;
dist = sqrt((i-ip)*(i-ip)*info_stat.delZ*info_stat.delZ + (j-jp)*(j-jp)*info_stat.delY*info_stat.delY + (k-kp)*(k-kp)*info_stat.delX*info_stat.delX);
if( dist <= info_stat.sub_thresh && ( i != ip || j != jp || k != kp)){
sub_dist = sqrt(dx*dx + dy*dy + dz*dz);
#if 0
cor_dest.i = ip + info_stat.boun_blk_sizeZ;
cor_dest.j = jp + info_stat.boun_blk_sizeY;
cor_dest.k = kp + info_stat.boun_blk_sizeX;
#else
cor_dest.i = ip +blk_dep + start_layer;
cor_dest.j = jp + block_row*BLK_SIZE;
cor_dest.k = kp + block_col*BLK_SIZE;
#endif
theta = atan(sqrt(dx*dx + dy*dy)/dz );
phi = atan2(dy,dx);
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_dev+1)*(nL_dev+1); cntp++){
sp_ind_src = get_voxind_spind(blk_dep + start_layer, block_row, block_col,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src] * sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp* exp(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, info_dyn )); ;//cexp_dev(-(1.01+0.0*I)*sub_dist);// cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, tisstype_tmp,info_dyn )); //cexp_dev(-(1.01+0.0*I)*dist)
tmp = tmp * (1.0/( info_stat.cm * sub_dist*sub_dist * __powf(__int2float_ru(info_stat.sub_vox),6.0)));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
//out_tmp = out_tmp + tmp;
}
}
}
}
}
__syncthreads();
}
#endif
__global__ void compute_propabs(complex_double *src_dist_dev, complex_double *out_dist_dev, Info_Dyn info_dyn,complex_double *sph_harm_dev,SHORT cnt, SHORT blk_dep, SHORT start_layer, SHORT flag){ // If flag =1, only then evaluate all the other spherical harmonics, else nl = 0.
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
int sp_ind;
COR *cor_src, *cor_dest;
SHORT cntp;
flt_doub theta,phi,dist,dx,dy,dz;
SHORT ip,jp,kp;
int sp_ind_src;
int nL_tmp;
flt_doub cm_tmp = 1.0;
cor_src = (COR*)malloc(sizeof(COR));
cor_dest = (COR*)malloc(sizeof(COR));
complex_double tmp;
nL_tmp = (flag == 0) ? 0 : nL_dev;
cor_src->i = i + blk_dep + start_layer;
cor_src->j = j + block_row*BLK_SIZE;
cor_src->k = k + block_col*BLK_SIZE;
//int sp_ind2;
for (ip = 0; ip < info_stat.nZ ; ip++){
dz = -(ip-cor_src->i)*info_stat.delZ;
for (jp = 0; jp < info_stat.nY ; jp++){
dy = -(jp-cor_src->j)*info_stat.delY;
for (kp = 0; kp < info_stat.nX; kp++){
dx = -(kp-cor_src->k)*info_stat.delX;
dist = sqrt(dx*dx + dy*dy + dz*dz);
if((ip != cor_src->i || jp != cor_src->j || kp != cor_src->k) && dist < info_stat.prop_thresh){
theta = acos(dz/dist );
if(theta < 0)
theta = theta + M_PI;
phi = atan2(dy,dx);
// if(phi < 0)
// phi = phi + 2*M_PI;
#if 1
cor_dest->i = ip;
cor_dest->j = jp;
cor_dest->k = kp;
#endif
for(cnt = 0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_tmp+1)*(nL_tmp+1); cntp++){
sp_ind_src = get_voxind_spind(0, 0, 0,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src]*sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp * exp(-mu_int(cor_src, cor_dest, dist, info_dyn )); //cexp_dev(-1*(1.01 + 0*I)*dist); // have commented this line
// tmp = tmp * cexp_dev(-1*(1.01 + 0*I)*dist);
tmp = tmp * (1.0/(cm_tmp*dist*dist));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
}
}
}
}
}
free(cor_src);
free(cor_dest);
__syncthreads();
}
__global__ void scale_dist_dev (doublecomplex *W,double scale_fac,SHORT cnt,SHORT block_dep )
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep , block_row , block_col ,i,j,k,cnt);
W[sp_ind] = W[sp_ind]*scale_fac;
}
}
__global__ void prop_scat_dev (doublecomplex *out_dist, Info_Dyn info_dyn, short layer_start, SHORT block_dep)
{
int sp_ind,cnt,l,r_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep, block_row, block_col,i,j,k,cnt);
l = (int) sqrtf(cnt);
out_dist[sp_ind] = pow (__int2double_rn(info_stat.g),__int2double_rn(l)) * out_dist[sp_ind] * info_stat.mu_sc[info_dyn.tiss_type[r_ind]] ;
}
__syncthreads();
}
__global__ void write_dist_dev (doublecomplex *W,doublecomplex val,SHORT cnt,SHORT block_dep, SHORT layer_start)
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ + layer_start, block_row, block_col,i,j,k,cnt);
W[sp_ind] = val;
}
}
__global__ void copy_dist_dev (doublecomplex *W1,doublecomplex *W2)
{
int sp_ind;
int cnt=0;
int block_dep=0;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
W2[sp_ind] = W1[sp_ind];
__syncthreads();
}
}
}
__global__ void add_dist_dev (doublecomplex *W1,doublecomplex *W2, doublecomplex *out )
{
int sp_ind,cnt,block_dep;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep -info_stat.bounZ, block_row, block_col,i,j,k,cnt);
out[sp_ind] = W1[sp_ind] + W2[sp_ind];
}
}
}
__global__ void compute_sph_coord(flt_doub* theta_self, flt_doub* phi_self, flt_doub* sph_x, flt_doub* sph_y, flt_doub* sph_z, int theta_blk, int phi_blk)
{
int theta_count = threadIdx.x + theta_blk;
int phi_count = threadIdx.y + phi_blk;
int omega_count;
omega_count = theta_count*ANG_RES + phi_count;
theta_self[omega_count] = theta_count * M_PI / ANG_RES ;
phi_self[omega_count] = phi_count * 2.0*M_PI / ANG_RES ;
sph_x[omega_count] = cos(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES);
sph_y[omega_count] = sin(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES) ;
sph_z[omega_count] = cos(theta_count * M_PI / ANG_RES) ;
}
#if 0
__global__ void compute_diag_selfsub(complex_double *fact_self_vox, flt_doub *sph_x, flt_doub *sph_y, flt_doub *sph_z, flt_doub *theta_self, int omega_count, SHORT tiss_num, int blk_dep)
{
int blk_row = blockIdx.y;
int blk_col = blockIdx.x;
int z_ind = threadIdx.z + blk_dep;
int y_ind = threadIdx.y +blk_row* BLK_SELF_SUB_VOX;
int x_ind = threadIdx.x + blk_col* BLK_SELF_SUB_VOX;
int face_calc;
int face = 1;
flt_doub face_x, face_y, face_z, cube_x, cube_y, cube_z, dist_self ;
//int r_ind_self = (threadIdx.z + blk_dep) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (threadIdx.y +blk_row* BLK_SELF_SUB_VOX) * info_stat.self_sub_vox + (threadIdx.x + blk_col* BLK_SELF_SUB_VOX);
int r_ind_self = (z_ind) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (y_ind) * info_stat.self_sub_vox + (x_ind);
flt_doub ii_self = -info_stat.self_sub_vox/2.0 +0.5 + z_ind;
flt_doub jj_self = -info_stat.self_sub_vox/2.0 +0.5 + y_ind;
flt_doub kk_self = -info_stat.self_sub_vox/2.0 +0.5 + x_ind;
face_x = 0;
face_y = 0;
face_z = 0;
cube_x = 0;
cube_y = 0;
cube_z = 0;
if (sph_x[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_x = face_calc ==0 ? face:-face;
face_y = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_x[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
face_z = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_x[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <=face && face_y >= -face && face_z <= face && face_z >= -face && sph_x[omega_count] * face_x >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#if 1
if(sph_y[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_y = face_calc ==0 ? face:-face;
face_z = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_y[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
face_x = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_y[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_y[omega_count] * face_y >= 0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
if(sph_z[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_z = face_calc ==0 ? face:-face;
face_x = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_z[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
face_y = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_z[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_z[omega_count] * face_z >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#endif
dist_self = sqrt( (ii_self*2.0/info_stat.self_sub_vox - cube_x)*(ii_self*2.0/info_stat.self_sub_vox - cube_x) + (jj_self*2.0/info_stat.self_sub_vox- cube_y)*(jj_self*2.0/info_stat.self_sub_vox- cube_y) + (kk_self*2.0/info_stat.self_sub_vox - cube_z)*(kk_self*2.0/info_stat.self_sub_vox - cube_z)) * info_stat.delX/2.0; //square voxel approx.
fact_self_vox[omega_count * info_stat.self_sub_vox * info_stat.self_sub_vox * info_stat.self_sub_vox + r_ind_self ] = ( 1 - exp( -(info_stat.mu_tot[tiss_num]) * dist_self)) * sin(theta_self[omega_count]);
}
#endif
#if 0
void generate_diag_terms_dev() {
int ang_res = ANG_RES;
int omega_count,ang_ind;
int r_ind_self;
complex_double *rt_self, *rtp_self;
int l,m,lp,mp,cnt,cntp;
flt_doub *theta_self, *phi_self, *sph_x_self, *sph_y_self, *sph_z_self;
flt_doub cm, del_theta, del_phi;
complex_double sub_v_sum_self;
int i;
cm = C / phan->n;
diag_terms_host = (complex_double *)malloc(sizeof(complex_double )* MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL);
theta_self = (flt_doub *)malloc(sizeof(flt_doub) * pow( ang_res,2));
phi_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_x_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_y_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_z_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
rt_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
rtp_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
flt_doub *theta_self_dev, *phi_self_dev,*sph_x_dev,*sph_y_dev,*sph_z_dev;
complex_double *fact_self_vox_dev, *fact_self_vox_host;
hipMalloc(&theta_self_dev, sizeof(flt_doub)*pow( ang_res,2));
MY_SAFE_CALL(hipMalloc(&phi_self_dev, sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_x_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_y_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&sph_z_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(hipMalloc(&fact_self_vox_dev, sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2)));
fact_self_vox_host = (complex_double *) malloc (sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2));
if(fact_self_vox_host == NULL){
printf("error in memory allocation \n");
exit(0);
}
dim3 dim_block_1(BLK_ANG_SIZE,BLK_ANG_SIZE,1);
dim3 dim_grid_1(1,1);
dim3 dim_block_2(BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX);
dim3 dim_grid_2(geom->self_sub_vox/BLK_SELF_SUB_VOX,geom->self_sub_vox/BLK_SELF_SUB_VOX);
int theta_count, phi_count;
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
hipLaunchKernelGGL(( compute_sph_coord), dim3(dim_grid_1), dim3(dim_block_1), 0, 0, theta_self_dev, phi_self_dev, sph_x_dev, sph_y_dev, sph_z_dev, theta_count, phi_count);
checkCUDAError("Kernel invocation in compute_sph_coord\n");
}
}
hipMemcpy(theta_self, theta_self_dev, sizeof(flt_doub)*pow( ang_res,2), hipMemcpyDeviceToHost);
hipMemcpy(phi_self, phi_self_dev, sizeof(flt_doub)*pow( ang_res,2), hipMemcpyDeviceToHost);
omega_count = 0;
/*
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
omega_count = theta_count * ANG_RES + phi_count;
// printf("%f %f %f \n", sph_x_self[omega_count], sph_y_self[omega_count],sph_z_self[omega_count], omega_count);
}
}
*/
del_theta = M_PI / ANG_RES;
del_phi = 2*M_PI / ANG_RES;
int tiss_num;
int blk_dep;
omega_count = 0;
for (tiss_num = 1; tiss_num < phan->no_tiss; tiss_num++){
for ( omega_count = 0; omega_count < pow(ang_res,2); omega_count++){
for(blk_dep=0; blk_dep < geom->self_sub_vox; blk_dep = blk_dep + BLK_SELF_SUB_VOX){
hipLaunchKernelGGL(( compute_diag_selfsub), dim3(dim_grid_2), dim3(dim_block_2), 0, 0, fact_self_vox_dev, sph_x_dev, sph_y_dev,sph_z_dev, theta_self_dev, omega_count, tiss_num,blk_dep);
checkCUDAError("Kernel invocation in compute_diag_selfsub\n");
}
}
hipMemcpy(fact_self_vox_host, fact_self_vox_dev, sizeof(complex_double) * pow(geom->self_sub_vox,3) * pow( ang_res,2), hipMemcpyDeviceToHost);
cnt = 0;
for (l = 0; l <= nL; l++) {
for (m = -l; m <= l; m++) {
cntp = 0;
SpherHarmonicArray(l, m, powf(ang_res,2), theta_self, phi_self, rt_self);
for (lp = 0; lp <= nL; lp++) {
for (mp = -lp; mp <= lp; mp++) {
sub_v_sum_self = 0.0 + 0.0*I;
SpherHarmonicArray(lp, mp, pow(ang_res,2), theta_self, phi_self, rtp_self);
for ( omega_count = 0; omega_count < ang_res * ang_res; omega_count++){
for ( r_ind_self = 0; r_ind_self < pow(geom->self_sub_vox,3); r_ind_self++){
sub_v_sum_self = sub_v_sum_self + ~(rt_self[omega_count]) * rtp_self[omega_count] * fact_self_vox_host[omega_count * (int)pow(geom->self_sub_vox,3) + r_ind_self];
}
}
diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num] = sub_v_sum_self * del_theta * del_phi / (cm * pow((double)geom->self_sub_vox,3) * (phan->mu_abs[tiss_num] + phan->mu_sc[tiss_num]) * geom->delX * geom->delY * geom->delZ) ;
if(cnt == cntp){
printf("The diagonal term is %e +%e i for tiss = %d, cnt = %d and cntp = %d \n", diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].real(), diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].imag(), tiss_num, cnt, cntp);
}
cntp++;
}
}
cnt++;
}
}
}
hipFree(sph_x_dev);
hipFree(sph_y_dev);
hipFree(sph_z_dev);
hipFree(theta_self_dev);
hipFree(phi_self_dev);
exit(0);
}
#endif
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void * prop_abs(void *arg){
#if 1
SHORT cnt;
SHORT block_dep;
//COR subvox_src, subvox_dest;
//SIGNED_SHORT i,j,k,ip,jp,kp;
//flt_doub dx_sub, dy_sub, dz_sub;
dim3 dim_block(BLK_SIZE, BLK_SIZE, BLK_SIZE_Z);
dim3 dim_grid(geom->nX/dim_block.x,geom->nY/dim_block.y);
// printf("% d and %d are no of blocks per grid \n", dim_grid.y, dim_grid.x);
// printf("% d %d and %d are no of threads per block \n", dim_block.x, dim_block.y, dim_block.z);
const THREAD_PARAMETERS parameters = *((THREAD_PARAMETERS *) arg);
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1) ;
int size = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * (geom->nZ + 2*geom->bounZ) * ( nL+1) * (nL+1);
complex_double *src_dev, *out_dev;
if(hipSetDevice(parameters.device_index) != hipSuccess){
printf("Error in setting up device %d \n", parameters.device_index);
exit(0);
}
MY_SAFE_CALL(hipMalloc(&src_dev, sizeof(complex_double)*size));
MY_SAFE_CALL(hipMalloc(&out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemcpy(src_dev, parameters.src_host,sizeof(complex_double)*size, hipMemcpyHostToDevice));
Info_Dyn info_dyn_dev;
MY_SAFE_CALL(hipMalloc(&(info_dyn_dev.tiss_type), sizeof(byte)*geom->no_vox));
MY_SAFE_CALL(hipMemcpy(info_dyn_dev.tiss_type, phan->tiss_type,sizeof(byte)*geom->no_vox, hipMemcpyHostToDevice));
MY_SAFE_CALL(hipMemcpyToSymbol(info_stat,info_stat_host,sizeof(Info_Stat) ));
MY_SAFE_CALL(hipMemcpyToSymbol(nL_dev,&nL,sizeof(SHORT) ));
MY_SAFE_CALL(hipMemcpyToSymbol(diag_terms_dev,diag_terms_host, sizeof(complex_double)*MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL));
complex_double *sph_harm_dev;
MY_SAFE_CALL(hipMalloc(&sph_harm_dev, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
MY_SAFE_CALL(hipMemcpy(sph_harm_dev,sph_harm, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES,hipMemcpyHostToDevice));
#if 0
for(cnt=0; cnt < (nL+1)*(nL+1); cnt++){
printf("Invoking compute_diagonal_abs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( write_dist_dev), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev,1+0.0*I,cnt,block_dep, parameters.layer_start);
}
}
#endif
#if 1
hipFuncSetCacheConfig(compute_diagonal_abs, hipFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( compute_diagonal_abs), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev,out_dev,info_dyn_dev,cnt,block_dep, parameters.layer_start, parameters.flag);
checkCUDAError("Kernel invocation in compute_diagonal_abs\n");
}
#endif
/* The prop_thresh condition. Again run thread for all the voxels */
#if 1
// printf("Invoking compute_propabs with cnt = %d \n", cnt);
hipFuncSetCacheConfig(compute_propabs, hipFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( compute_propabs), dim3(dim_grid), dim3(dim_block), 0, 0, src_dev, out_dev, info_dyn_dev, sph_harm_dev, cnt, block_dep,parameters.layer_start,parameters.flag);
//printf("%d operation complete \n", block_dep/parameters.num_layers);
checkCUDAError("Kernel invocation in compute_propabs\n");
}
#endif
hipFuncSetCacheConfig(scale_dist_dev, hipFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( scale_dist_dev), dim3(dim_grid), dim3(dim_block), 0, 0, out_dev, geom->delX * geom->delY * geom->delZ,cnt,block_dep);
checkCUDAError("Kernel invocation in scale_dist_dev\n");
}
#if 1
if(parameters.flag_scat){
//hipFuncSetCacheConfig(prop_scat_dev, hipFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
hipLaunchKernelGGL(( prop_scat_dev), dim3(dim_grid), dim3(dim_block), 0, 0, out_dev, info_dyn_dev,parameters.layer_start,block_dep);
checkCUDAError("Kernel invocation in prop_dscat_dev\n");
}
}
#endif
hipDeviceSynchronize();
MY_SAFE_CALL(hipMemcpy(parameters.out_host, out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers), hipMemcpyDeviceToHost));
MY_SAFE_CALL(hipMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(hipMemset(src_dev, 0, sizeof(complex_double)*size));
MY_SAFE_CALL(hipMemset(sph_harm_dev, 0, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
hipFree(src_dev);
hipFree(out_dev);
hipFree(sph_harm_dev);
hipDeviceReset();
pthread_exit(NULL);
#endif
}
|
e13c9aea692988a9dd0ccb4ce963cdedb2e6520f.cu
|
//#include <cuda.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
#include "complex_arith.cu"
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
__constant__ Info_Stat info_stat;
__constant__ SHORT nL_dev;
__constant__ complex_double diag_terms_dev[MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL];
/* Just think of the vox indices as (blk_dep*BLK_SIZE + dep + info_stat.bounZ, blk_row*BLK_SIZE + row + info_stat.bounY, blk_col * BLK_SIZE + col + info_stat.nX) */
__device__ int get_voxind_spind(int blk_dep, int blk_row,int blk_col, int dep, int row, int col, int cnt){
return VOX_TO_SPIND( ((blk_dep + dep + info_stat.bounZ)*(2*info_stat.bounX + info_stat.nX) * (2*info_stat.bounY + info_stat.nY) + (blk_row*BLK_SIZE + row + info_stat.bounY)* (2*info_stat.bounX + info_stat.nX) + (blk_col * BLK_SIZE + col + info_stat.bounX)), cnt, (nL_dev+1)*(nL_dev+1));
}
__device__ int get_vind_phanind(int dep, int row, int col){
return ((info_stat.bounZ + dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + ( info_stat.bounY + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + col));
}
__device__ int get_voxind_phanind(int blk_dep, int blk_row,int blk_col,int blk_offset_i, int blk_offset_j, int blk_offset_k, int dep, int row, int col){
return (info_stat.bounZ + blk_dep + blk_offset_i * BLK_SIZE+ dep) * (info_stat.nX + 2 * info_stat.bounX ) * (info_stat.nY + 2 * info_stat.bounY ) /* reached the correct layer */ + (blk_row * BLK_SIZE + info_stat.bounY + blk_offset_j * BLK_SIZE + row)* (info_stat.nX + 2 * info_stat.bounX ) + (info_stat.bounX + blk_col * BLK_SIZE + blk_offset_k * BLK_SIZE + col);
}
__device__ int get_ind_phanind(int dep, int row, int col){
return dep* (2*info_stat.boun_blk_sizeX + BLK_SIZE)* (BLK_SIZE + 2*info_stat.boun_blk_sizeY) + row* (BLK_SIZE + 2*info_stat.boun_blk_sizeX) + col;
}
__device__ flt_doub
mu_int (COR *cor_src, COR *cor_dest, float r, Info_Dyn info_dyn)
{
flt_doub mu_tot_path;
mu_tot_path = 0.0;
float alpha_x_curr, alpha_y_curr, alpha_z_curr;
float alpha_xinc, alpha_yinc, alpha_zinc;
SHORT i_curr, j_curr, k_curr;
float alpha_prev;
byte flag = 1;
alpha_x_curr = 0;
alpha_y_curr = 0;
alpha_z_curr = 0;
alpha_prev = 0;
SHORT x_inc_end,y_inc_end,z_inc_end;
i_curr = cor_src->i;
j_curr = cor_src->j;
k_curr = cor_src->k;
if (cor_dest->i != cor_src->i)
alpha_zinc = fabs (0.5 / (cor_dest->i - cor_src->i)); // The initial increment along the z axis.
else
alpha_zinc = INF;
if (cor_dest->j != cor_src->j)
alpha_yinc = fabs (0.5 / (cor_dest->j - cor_src->j));
else
alpha_yinc = INF;
if (cor_dest->k != cor_src->k)
alpha_xinc = fabs (0.5 / (cor_dest->k - cor_src->k));
else
alpha_xinc = INF;
#if 1
while (flag == 1 && alpha_prev < 1) // Hack for now to avoid infinite loops
{
if (alpha_z_curr + alpha_zinc <= alpha_x_curr + alpha_xinc && alpha_z_curr + alpha_zinc <= alpha_y_curr + alpha_yinc)
{
alpha_z_curr += alpha_zinc;
if ( i_curr == cor_src->i)
alpha_zinc *=2; // We have taken the first step along the z axis, which was half a voxel. Now every step will be one voxel.
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_z_curr - alpha_prev);
i_curr = (cor_src->i < cor_dest->i) ? i_curr + 1 : i_curr - 1;
alpha_prev = alpha_z_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_zinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_y_curr + alpha_yinc < alpha_z_curr + alpha_zinc && alpha_y_curr + alpha_yinc <= alpha_x_curr + alpha_xinc )
{
alpha_y_curr += alpha_yinc;
if ( j_curr == cor_src->j)
alpha_yinc *=2;
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * (alpha_y_curr - alpha_prev);
j_curr = (cor_src->j < cor_dest->j) ? j_curr + 1 : j_curr - 1;
alpha_prev = alpha_y_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_yinc/2.0;
flag = 0;
return mu_tot_path;
}
}
else if (alpha_x_curr + alpha_xinc < alpha_y_curr + alpha_yinc && alpha_x_curr + alpha_xinc < alpha_z_curr + alpha_zinc )
{
alpha_x_curr += alpha_xinc;
if ( k_curr == cor_src->k)
alpha_xinc *=2;
mu_tot_path = mu_tot_path + (info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]]) * r * (alpha_x_curr - alpha_prev);
k_curr = (cor_src->k < cor_dest->k) ? k_curr + 1 : k_curr - 1;
alpha_prev = alpha_x_curr;
if (i_curr == cor_dest->i && j_curr == cor_dest->j && k_curr == cor_dest->k)
{
mu_tot_path = mu_tot_path + info_stat.mu_tot[info_dyn.tiss_type[get_vind_phanind(i_curr,j_curr,k_curr)]] * r * alpha_xinc/2.0;
flag = 0;
return mu_tot_path;
}
}
}
#endif
return mu_tot_path;
}
__global__ void compute_diagonal_abs (doublecomplex *src_dist,doublecomplex *out_dist,Info_Dyn info_dyn, int cnt,SHORT block_dep, SHORT layer_start, SHORT flag)
{
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
int sp_ind, sp_ind_src, r_ind;
SHORT cntp;
SHORT nL_tmp;
flt_doub cm_tmp = 1.0;
nL_tmp = (flag == 0) ? 0 : nL_dev;
// cm_tmp = (flag == 0) ? 1 : info_stat.cm
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for(cnt=0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col ,i,j,k,cnt);
out_dist[sp_ind] = 0 + 0*I;
for(cntp=0; cntp<(nL_tmp+1)*(nL_tmp+1) ; cntp++){
sp_ind_src = get_voxind_spind(block_dep + layer_start, block_row, block_col,i,j,k,cntp);
out_dist[sp_ind] = out_dist[sp_ind] + (1.0/cm_tmp)*diag_terms_dev[cnt * (MAX_NL) * MAX_NL * MAX_TISS_NUM + cntp * MAX_TISS_NUM + info_dyn.tiss_type[r_ind]] * src_dist[sp_ind_src];
}
}
}
#if 0
//extern __shared__ char array_tisstype[];
__global__ void compute_subvox_2(complex_double* src_dist_dev,complex_double* out_dist_dev, Info_Dyn info_dyn, complex_double* sph_harm_dev, SHORT cnt, COR subvox_src,COR subvox_dest, flt_doub dz_sub, flt_doub dy_sub, flt_doub dx_sub, SHORT blk_dep, SHORT start_layer){
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
SHORT cntp;
complex_double out_tmp, tmp;
COR cor_src, cor_dest;
SHORT ip, jp, kp;
int sp_ind;
flt_doub theta,phi,dist,dx,dy,dz;
int sp_ind_src;
#if 0
cor_src.i = i + info_stat.boun_blk_sizeZ;
cor_src.j = j + info_stat.boun_blk_sizeY;
cor_src.k = k + info_stat.boun_blk_sizeX;
#else
cor_src.i = i + blk_dep + start_layer;
cor_src.j = j + block_row*BLK_SIZE;
cor_src.k = k + block_col*BLK_SIZE;
#endif
// __shared__ complex_double src_tmp[BLK_SRC_SIZE];
#if 0
int blk_offset_i, blk_offset_j,blk_offset_k;
byte *tisstype_tmp = (byte *) array_tisstype;
for(blk_offset_i=0; blk_offset_i< 1 + 2*info_stat.boun_blkZ; blk_offset_i++){
for(blk_offset_j=0; blk_offset_j< 1 + 2*info_stat.boun_blkY; blk_offset_j++){
for(blk_offset_k=0; blk_offset_k< 1 + 2*info_stat.boun_blkX; blk_offset_k++){
tisstype_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = info_dyn.tiss_type[get_voxind_phanind(blk_dep, block_row, block_col,blk_offset_i - info_stat.boun_blkZ,blk_offset_j - info_stat.boun_blkY,blk_offset_k - info_stat.boun_blkX,i,j,k)];
// src_tmp[get_ind_phanind(blk_offset_i*BLK_SIZE + i, blk_offset_j*BLK_SIZE + j, blk_offset_k*BLK_SIZE + k)] = src_dist_dev[VOX_TO_SPIND(get_voxind_phanind(blk_dep, block_row, block_col,(blk_offset_i - info_stat.bounZ/BLK_SIZE),(blk_offset_j - info_stat.bounY/BLK_SIZE),(blk_offset_k - info_stat.bounX/BLK_SIZE),i,j,k),cntp,info_stat.no_vox)];
}
}
}
__syncthreads();
#endif
out_tmp = 0 + 0*I;
flt_doub sub_dist;
for(ip= i - info_stat.subbounZ; ip <= i + info_stat.subbounZ; ip++){
dz = -(ip-i)*info_stat.delZ + dz_sub;
for(jp= j - info_stat.subbounY; jp <= j + info_stat.subbounY; jp++){
dy = -(jp-j)*info_stat.delY + dy_sub;
for(kp= k - info_stat.subbounX; kp <= k + info_stat.subbounX; kp++){
dx = -(kp-k)*info_stat.delX + dx_sub;
dist = sqrt((i-ip)*(i-ip)*info_stat.delZ*info_stat.delZ + (j-jp)*(j-jp)*info_stat.delY*info_stat.delY + (k-kp)*(k-kp)*info_stat.delX*info_stat.delX);
if( dist <= info_stat.sub_thresh && ( i != ip || j != jp || k != kp)){
sub_dist = sqrt(dx*dx + dy*dy + dz*dz);
#if 0
cor_dest.i = ip + info_stat.boun_blk_sizeZ;
cor_dest.j = jp + info_stat.boun_blk_sizeY;
cor_dest.k = kp + info_stat.boun_blk_sizeX;
#else
cor_dest.i = ip +blk_dep + start_layer;
cor_dest.j = jp + block_row*BLK_SIZE;
cor_dest.k = kp + block_col*BLK_SIZE;
#endif
theta = atan(sqrt(dx*dx + dy*dy)/dz );
phi = atan2(dy,dx);
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_dev+1)*(nL_dev+1); cntp++){
sp_ind_src = get_voxind_spind(blk_dep + start_layer, block_row, block_col,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src] * sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp* exp(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, info_dyn )); ;//cexp_dev(-(1.01+0.0*I)*sub_dist);// cexp_dev(-mu_int(cor_src, cor_dest, subvox_src, subvox_dest,sub_dist, tisstype_tmp,info_dyn )); //cexp_dev(-(1.01+0.0*I)*dist)
tmp = tmp * (1.0/( info_stat.cm * sub_dist*sub_dist * __powf(__int2float_ru(info_stat.sub_vox),6.0)));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
//out_tmp = out_tmp + tmp;
}
}
}
}
}
__syncthreads();
}
#endif
__global__ void compute_propabs(complex_double *src_dist_dev, complex_double *out_dist_dev, Info_Dyn info_dyn,complex_double *sph_harm_dev,SHORT cnt, SHORT blk_dep, SHORT start_layer, SHORT flag){ // If flag =1, only then evaluate all the other spherical harmonics, else nl = 0.
SHORT block_row = blockIdx.y;
SHORT block_col = blockIdx.x;
SHORT i = threadIdx.z;
SHORT j = threadIdx.y;
SHORT k = threadIdx.x;
int sp_ind;
COR *cor_src, *cor_dest;
SHORT cntp;
flt_doub theta,phi,dist,dx,dy,dz;
SHORT ip,jp,kp;
int sp_ind_src;
int nL_tmp;
flt_doub cm_tmp = 1.0;
cor_src = (COR*)malloc(sizeof(COR));
cor_dest = (COR*)malloc(sizeof(COR));
complex_double tmp;
nL_tmp = (flag == 0) ? 0 : nL_dev;
cor_src->i = i + blk_dep + start_layer;
cor_src->j = j + block_row*BLK_SIZE;
cor_src->k = k + block_col*BLK_SIZE;
//int sp_ind2;
for (ip = 0; ip < info_stat.nZ ; ip++){
dz = -(ip-cor_src->i)*info_stat.delZ;
for (jp = 0; jp < info_stat.nY ; jp++){
dy = -(jp-cor_src->j)*info_stat.delY;
for (kp = 0; kp < info_stat.nX; kp++){
dx = -(kp-cor_src->k)*info_stat.delX;
dist = sqrt(dx*dx + dy*dy + dz*dz);
if((ip != cor_src->i || jp != cor_src->j || kp != cor_src->k) && dist < info_stat.prop_thresh){
theta = acos(dz/dist );
if(theta < 0)
theta = theta + M_PI;
phi = atan2(dy,dx);
// if(phi < 0)
// phi = phi + 2*M_PI;
#if 1
cor_dest->i = ip;
cor_dest->j = jp;
cor_dest->k = kp;
#endif
for(cnt = 0; cnt < (nL_tmp+1)*(nL_tmp+1); cnt++){
sp_ind = get_voxind_spind(blk_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
tmp = 0 + 0*I;
for(cntp=0; cntp< (nL_tmp+1)*(nL_tmp+1); cntp++){
sp_ind_src = get_voxind_spind(0, 0, 0,ip,jp,kp,cntp);
tmp = tmp + src_dist_dev[sp_ind_src]*sph_harm_dev[SPH_HARM_IND(cntp,theta,phi)];
}
tmp = tmp * ~sph_harm_dev[SPH_HARM_IND(cnt,theta,phi)];
tmp = tmp * exp(-mu_int(cor_src, cor_dest, dist, info_dyn )); //cexp_dev(-1*(1.01 + 0*I)*dist); // have commented this line
// tmp = tmp * cexp_dev(-1*(1.01 + 0*I)*dist);
tmp = tmp * (1.0/(cm_tmp*dist*dist));
out_dist_dev[sp_ind] = out_dist_dev[sp_ind] + tmp;
}
}
}
}
}
free(cor_src);
free(cor_dest);
__syncthreads();
}
__global__ void scale_dist_dev (doublecomplex *W,double scale_fac,SHORT cnt,SHORT block_dep )
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep , block_row , block_col ,i,j,k,cnt);
W[sp_ind] = W[sp_ind]*scale_fac;
}
}
__global__ void prop_scat_dev (doublecomplex *out_dist, Info_Dyn info_dyn, short layer_start, SHORT block_dep)
{
int sp_ind,cnt,l,r_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
r_ind = get_vind_phanind(block_dep + layer_start + i, block_row * BLK_SIZE + j, block_col * BLK_SIZE+ k);
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep, block_row, block_col,i,j,k,cnt);
l = (int) sqrtf(cnt);
out_dist[sp_ind] = pow (__int2double_rn(info_stat.g),__int2double_rn(l)) * out_dist[sp_ind] * info_stat.mu_sc[info_dyn.tiss_type[r_ind]] ;
}
__syncthreads();
}
__global__ void write_dist_dev (doublecomplex *W,doublecomplex val,SHORT cnt,SHORT block_dep, SHORT layer_start)
{
int sp_ind;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(cnt=0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ + layer_start, block_row, block_col,i,j,k,cnt);
W[sp_ind] = val;
}
}
__global__ void copy_dist_dev (doublecomplex *W1,doublecomplex *W2)
{
int sp_ind;
int cnt=0;
int block_dep=0;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep - info_stat.bounZ, block_row, block_col,i,j,k,cnt);
W2[sp_ind] = W1[sp_ind];
__syncthreads();
}
}
}
__global__ void add_dist_dev (doublecomplex *W1,doublecomplex *W2, doublecomplex *out )
{
int sp_ind,cnt,block_dep;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int i = threadIdx.z;
int j = threadIdx.y;
int k = threadIdx.x;
for(block_dep = 0; block_dep < info_stat.nZ; block_dep = block_dep + BLK_SIZE){
for (cnt = 0; cnt < (nL_dev+1)*(nL_dev+1); cnt++){
sp_ind = get_voxind_spind(block_dep -info_stat.bounZ, block_row, block_col,i,j,k,cnt);
out[sp_ind] = W1[sp_ind] + W2[sp_ind];
}
}
}
__global__ void compute_sph_coord(flt_doub* theta_self, flt_doub* phi_self, flt_doub* sph_x, flt_doub* sph_y, flt_doub* sph_z, int theta_blk, int phi_blk)
{
int theta_count = threadIdx.x + theta_blk;
int phi_count = threadIdx.y + phi_blk;
int omega_count;
omega_count = theta_count*ANG_RES + phi_count;
theta_self[omega_count] = theta_count * M_PI / ANG_RES ;
phi_self[omega_count] = phi_count * 2.0*M_PI / ANG_RES ;
sph_x[omega_count] = cos(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES);
sph_y[omega_count] = sin(phi_count * 2.0*M_PI / ANG_RES) * sin(theta_count * M_PI / ANG_RES) ;
sph_z[omega_count] = cos(theta_count * M_PI / ANG_RES) ;
}
#if 0
__global__ void compute_diag_selfsub(complex_double *fact_self_vox, flt_doub *sph_x, flt_doub *sph_y, flt_doub *sph_z, flt_doub *theta_self, int omega_count, SHORT tiss_num, int blk_dep)
{
int blk_row = blockIdx.y;
int blk_col = blockIdx.x;
int z_ind = threadIdx.z + blk_dep;
int y_ind = threadIdx.y +blk_row* BLK_SELF_SUB_VOX;
int x_ind = threadIdx.x + blk_col* BLK_SELF_SUB_VOX;
int face_calc;
int face = 1;
flt_doub face_x, face_y, face_z, cube_x, cube_y, cube_z, dist_self ;
//int r_ind_self = (threadIdx.z + blk_dep) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (threadIdx.y +blk_row* BLK_SELF_SUB_VOX) * info_stat.self_sub_vox + (threadIdx.x + blk_col* BLK_SELF_SUB_VOX);
int r_ind_self = (z_ind) * (info_stat.self_sub_vox)*(info_stat.self_sub_vox) + (y_ind) * info_stat.self_sub_vox + (x_ind);
flt_doub ii_self = -info_stat.self_sub_vox/2.0 +0.5 + z_ind;
flt_doub jj_self = -info_stat.self_sub_vox/2.0 +0.5 + y_ind;
flt_doub kk_self = -info_stat.self_sub_vox/2.0 +0.5 + x_ind;
face_x = 0;
face_y = 0;
face_z = 0;
cube_x = 0;
cube_y = 0;
cube_z = 0;
if (sph_x[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_x = face_calc ==0 ? face:-face;
face_y = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_x[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
face_z = (face_x - ii_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_x[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <=face && face_y >= -face && face_z <= face && face_z >= -face && sph_x[omega_count] * face_x >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#if 1
if(sph_y[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_y = face_calc ==0 ? face:-face;
face_z = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_z[omega_count]/ sph_y[omega_count] + kk_self*2.0/info_stat.self_sub_vox;
face_x = (face_y - jj_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_y[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_y[omega_count] * face_y >= 0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
if(sph_z[omega_count] != 0.0){
for ( face_calc = 0; face_calc <2; face_calc++){
face_z = face_calc ==0 ? face:-face;
face_x = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_x[omega_count]/ sph_z[omega_count] + ii_self*2.0/info_stat.self_sub_vox;
face_y = (face_z - kk_self*2.0/info_stat.self_sub_vox) * sph_y[omega_count]/ sph_z[omega_count] + jj_self*2.0/info_stat.self_sub_vox;
if (face_x <= face && face_x >= -face && face_y <= face && face_y >= -face && face_z <= face && face_z >= -face && sph_z[omega_count] * face_z >=0){
cube_x = face_x;
cube_y = face_y;
cube_z = face_z;
}
}
}
#endif
dist_self = sqrt( (ii_self*2.0/info_stat.self_sub_vox - cube_x)*(ii_self*2.0/info_stat.self_sub_vox - cube_x) + (jj_self*2.0/info_stat.self_sub_vox- cube_y)*(jj_self*2.0/info_stat.self_sub_vox- cube_y) + (kk_self*2.0/info_stat.self_sub_vox - cube_z)*(kk_self*2.0/info_stat.self_sub_vox - cube_z)) * info_stat.delX/2.0; //square voxel approx.
fact_self_vox[omega_count * info_stat.self_sub_vox * info_stat.self_sub_vox * info_stat.self_sub_vox + r_ind_self ] = ( 1 - exp( -(info_stat.mu_tot[tiss_num]) * dist_self)) * sin(theta_self[omega_count]);
}
#endif
#if 0
void generate_diag_terms_dev() {
int ang_res = ANG_RES;
int omega_count,ang_ind;
int r_ind_self;
complex_double *rt_self, *rtp_self;
int l,m,lp,mp,cnt,cntp;
flt_doub *theta_self, *phi_self, *sph_x_self, *sph_y_self, *sph_z_self;
flt_doub cm, del_theta, del_phi;
complex_double sub_v_sum_self;
int i;
cm = C / phan->n;
diag_terms_host = (complex_double *)malloc(sizeof(complex_double )* MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL);
theta_self = (flt_doub *)malloc(sizeof(flt_doub) * pow( ang_res,2));
phi_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_x_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_y_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
sph_z_self = (flt_doub *)malloc(sizeof(flt_doub) * pow(ang_res,2));
rt_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
rtp_self = (complex_double *)malloc(sizeof(complex_double) * pow(ang_res,2));
flt_doub *theta_self_dev, *phi_self_dev,*sph_x_dev,*sph_y_dev,*sph_z_dev;
complex_double *fact_self_vox_dev, *fact_self_vox_host;
cudaMalloc(&theta_self_dev, sizeof(flt_doub)*pow( ang_res,2));
MY_SAFE_CALL(cudaMalloc(&phi_self_dev, sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_x_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_y_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&sph_z_dev,sizeof(flt_doub)*pow( ang_res,2)));
MY_SAFE_CALL(cudaMalloc(&fact_self_vox_dev, sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2)));
fact_self_vox_host = (complex_double *) malloc (sizeof(complex_double ) * pow(geom->self_sub_vox,3) * pow( ang_res,2));
if(fact_self_vox_host == NULL){
printf("error in memory allocation \n");
exit(0);
}
dim3 dim_block_1(BLK_ANG_SIZE,BLK_ANG_SIZE,1);
dim3 dim_grid_1(1,1);
dim3 dim_block_2(BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX,BLK_SELF_SUB_VOX);
dim3 dim_grid_2(geom->self_sub_vox/BLK_SELF_SUB_VOX,geom->self_sub_vox/BLK_SELF_SUB_VOX);
int theta_count, phi_count;
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
compute_sph_coord<<<dim_grid_1, dim_block_1>>>(theta_self_dev, phi_self_dev, sph_x_dev, sph_y_dev, sph_z_dev, theta_count, phi_count);
checkCUDAError("Kernel invocation in compute_sph_coord\n");
}
}
cudaMemcpy(theta_self, theta_self_dev, sizeof(flt_doub)*pow( ang_res,2), cudaMemcpyDeviceToHost);
cudaMemcpy(phi_self, phi_self_dev, sizeof(flt_doub)*pow( ang_res,2), cudaMemcpyDeviceToHost);
omega_count = 0;
/*
for(theta_count = 0; theta_count < ANG_RES; theta_count = theta_count + BLK_ANG_SIZE){
for( phi_count=0; phi_count < ANG_RES; phi_count = phi_count + BLK_ANG_SIZE){
omega_count = theta_count * ANG_RES + phi_count;
// printf("%f %f %f \n", sph_x_self[omega_count], sph_y_self[omega_count],sph_z_self[omega_count], omega_count);
}
}
*/
del_theta = M_PI / ANG_RES;
del_phi = 2*M_PI / ANG_RES;
int tiss_num;
int blk_dep;
omega_count = 0;
for (tiss_num = 1; tiss_num < phan->no_tiss; tiss_num++){
for ( omega_count = 0; omega_count < pow(ang_res,2); omega_count++){
for(blk_dep=0; blk_dep < geom->self_sub_vox; blk_dep = blk_dep + BLK_SELF_SUB_VOX){
compute_diag_selfsub<<<dim_grid_2, dim_block_2>>>(fact_self_vox_dev, sph_x_dev, sph_y_dev,sph_z_dev, theta_self_dev, omega_count, tiss_num,blk_dep);
checkCUDAError("Kernel invocation in compute_diag_selfsub\n");
}
}
cudaMemcpy(fact_self_vox_host, fact_self_vox_dev, sizeof(complex_double) * pow(geom->self_sub_vox,3) * pow( ang_res,2), cudaMemcpyDeviceToHost);
cnt = 0;
for (l = 0; l <= nL; l++) {
for (m = -l; m <= l; m++) {
cntp = 0;
SpherHarmonicArray(l, m, powf(ang_res,2), theta_self, phi_self, rt_self);
for (lp = 0; lp <= nL; lp++) {
for (mp = -lp; mp <= lp; mp++) {
sub_v_sum_self = 0.0 + 0.0*I;
SpherHarmonicArray(lp, mp, pow(ang_res,2), theta_self, phi_self, rtp_self);
for ( omega_count = 0; omega_count < ang_res * ang_res; omega_count++){
for ( r_ind_self = 0; r_ind_self < pow(geom->self_sub_vox,3); r_ind_self++){
sub_v_sum_self = sub_v_sum_self + ~(rt_self[omega_count]) * rtp_self[omega_count] * fact_self_vox_host[omega_count * (int)pow(geom->self_sub_vox,3) + r_ind_self];
}
}
diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num] = sub_v_sum_self * del_theta * del_phi / (cm * pow((double)geom->self_sub_vox,3) * (phan->mu_abs[tiss_num] + phan->mu_sc[tiss_num]) * geom->delX * geom->delY * geom->delZ) ;
if(cnt == cntp){
printf("The diagonal term is %e +%e i for tiss = %d, cnt = %d and cntp = %d \n", diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].real(), diag_terms_host[cnt*MAX_TISS_NUM*(MAX_NL)*(MAX_NL) + cntp * MAX_TISS_NUM + tiss_num].imag(), tiss_num, cnt, cntp);
}
cntp++;
}
}
cnt++;
}
}
}
cudaFree(sph_x_dev);
cudaFree(sph_y_dev);
cudaFree(sph_z_dev);
cudaFree(theta_self_dev);
cudaFree(phi_self_dev);
exit(0);
}
#endif
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void * prop_abs(void *arg){
#if 1
SHORT cnt;
SHORT block_dep;
//COR subvox_src, subvox_dest;
//SIGNED_SHORT i,j,k,ip,jp,kp;
//flt_doub dx_sub, dy_sub, dz_sub;
dim3 dim_block(BLK_SIZE, BLK_SIZE, BLK_SIZE_Z);
dim3 dim_grid(geom->nX/dim_block.x,geom->nY/dim_block.y);
// printf("% d and %d are no of blocks per grid \n", dim_grid.y, dim_grid.x);
// printf("% d %d and %d are no of threads per block \n", dim_block.x, dim_block.y, dim_block.z);
const THREAD_PARAMETERS parameters = *((THREAD_PARAMETERS *) arg);
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1) ;
int size = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * (geom->nZ + 2*geom->bounZ) * ( nL+1) * (nL+1);
complex_double *src_dev, *out_dev;
if(cudaSetDevice(parameters.device_index) != cudaSuccess){
printf("Error in setting up device %d \n", parameters.device_index);
exit(0);
}
MY_SAFE_CALL(cudaMalloc(&src_dev, sizeof(complex_double)*size));
MY_SAFE_CALL(cudaMalloc(&out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemcpy(src_dev, parameters.src_host,sizeof(complex_double)*size, cudaMemcpyHostToDevice));
Info_Dyn info_dyn_dev;
MY_SAFE_CALL(cudaMalloc(&(info_dyn_dev.tiss_type), sizeof(byte)*geom->no_vox));
MY_SAFE_CALL(cudaMemcpy(info_dyn_dev.tiss_type, phan->tiss_type,sizeof(byte)*geom->no_vox, cudaMemcpyHostToDevice));
MY_SAFE_CALL(cudaMemcpyToSymbol(info_stat,info_stat_host,sizeof(Info_Stat) ));
MY_SAFE_CALL(cudaMemcpyToSymbol(nL_dev,&nL,sizeof(SHORT) ));
MY_SAFE_CALL(cudaMemcpyToSymbol(diag_terms_dev,diag_terms_host, sizeof(complex_double)*MAX_TISS_NUM*MAX_NL*MAX_NL*MAX_NL*MAX_NL));
complex_double *sph_harm_dev;
MY_SAFE_CALL(cudaMalloc(&sph_harm_dev, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
MY_SAFE_CALL(cudaMemcpy(sph_harm_dev,sph_harm, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES,cudaMemcpyHostToDevice));
#if 0
for(cnt=0; cnt < (nL+1)*(nL+1); cnt++){
printf("Invoking compute_diagonal_abs with cnt = %d \n", cnt);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
write_dist_dev<<<dim_grid, dim_block>>>(src_dev,1+0.0*I,cnt,block_dep, parameters.layer_start);
}
}
#endif
#if 1
cudaFuncSetCacheConfig(compute_diagonal_abs, cudaFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
compute_diagonal_abs<<<dim_grid, dim_block>>>(src_dev,out_dev,info_dyn_dev,cnt,block_dep, parameters.layer_start, parameters.flag);
checkCUDAError("Kernel invocation in compute_diagonal_abs\n");
}
#endif
/* The prop_thresh condition. Again run thread for all the voxels */
#if 1
// printf("Invoking compute_propabs with cnt = %d \n", cnt);
cudaFuncSetCacheConfig(compute_propabs, cudaFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
compute_propabs<<<dim_grid, dim_block>>>(src_dev, out_dev, info_dyn_dev, sph_harm_dev, cnt, block_dep,parameters.layer_start,parameters.flag);
//printf("%d operation complete \n", block_dep/parameters.num_layers);
checkCUDAError("Kernel invocation in compute_propabs\n");
}
#endif
cudaFuncSetCacheConfig(scale_dist_dev, cudaFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
scale_dist_dev<<<dim_grid, dim_block>>>(out_dev, geom->delX * geom->delY * geom->delZ,cnt,block_dep);
checkCUDAError("Kernel invocation in scale_dist_dev\n");
}
#if 1
if(parameters.flag_scat){
//cudaFuncSetCacheConfig(prop_scat_dev, cudaFuncCachePreferL1);
for(block_dep = 0; block_dep < parameters.num_layers; block_dep = block_dep + BLK_SIZE_Z){
prop_scat_dev<<<dim_grid, dim_block>>>(out_dev, info_dyn_dev,parameters.layer_start,block_dep);
checkCUDAError("Kernel invocation in prop_dscat_dev\n");
}
}
#endif
cudaThreadSynchronize();
MY_SAFE_CALL(cudaMemcpy(parameters.out_host, out_dev, sizeof(complex_double)*size_layer*(parameters.num_layers), cudaMemcpyDeviceToHost));
MY_SAFE_CALL(cudaMemset(out_dev, 0, sizeof(complex_double)*size_layer*(parameters.num_layers)));
MY_SAFE_CALL(cudaMemset(src_dev, 0, sizeof(complex_double)*size));
MY_SAFE_CALL(cudaMemset(sph_harm_dev, 0, sizeof(complex_double)*(nL+1)*(nL+1) * THETA_ANG_RES * PHI_ANG_RES));
cudaFree(src_dev);
cudaFree(out_dev);
cudaFree(sph_harm_dev);
cudaThreadExit();
pthread_exit(NULL);
#endif
}
|
100991be592c9465c21e6730dc07ce90d770cada.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "yoloPlugin.h"
#include "kernel.h"
#include <cstring>
#include <rocblas.h>
#include <cudnn.h>
#include <iostream>
#include <sstream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSize, const uint numOutputClasses,
const uint numBBoxes)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes))
{
return;
}
const int numGridCells = gridSize * gridSize;
const int bbindex = y_id * gridSize + x_id;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
hipError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize,
const uint& numOutputClasses, const uint& numBBoxes,
uint64_t outputSize, hipStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSize / threads_per_block.x) + 1,
(gridSize / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (int batch = 0; batch < batchSize; ++batch)
{
hipLaunchKernelGGL(( gpuYoloLayerV3), dim3(number_of_blocks), dim3(threads_per_block), 0, stream,
reinterpret_cast<const float*>(input) + (batch * outputSize),
reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses,
numBBoxes);
}
return hipGetLastError();
}
|
100991be592c9465c21e6730dc07ce90d770cada.cu
|
#include "yoloPlugin.h"
#include "kernel.h"
#include <cstring>
#include <cublas_v2.h>
#include <cudnn.h>
#include <iostream>
#include <sstream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSize, const uint numOutputClasses,
const uint numBBoxes)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes))
{
return;
}
const int numGridCells = gridSize * gridSize;
const int bbindex = y_id * gridSize + x_id;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
cudaError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize,
const uint& numOutputClasses, const uint& numBBoxes,
uint64_t outputSize, cudaStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSize / threads_per_block.x) + 1,
(gridSize / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (int batch = 0; batch < batchSize; ++batch)
{
gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>(
reinterpret_cast<const float*>(input) + (batch * outputSize),
reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses,
numBBoxes);
}
return cudaGetLastError();
}
|
e85bad48cfbeec074ce1d12d2805358756e0d83b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
// ================== Function: gpu_setUpGraph ====================
// initializes all the unvisted vertices as infinity
// marks all vertices not visited
// sets the weight of distance to itself as 0
// all done in multiple cores / threads
__global__ void gpu_setUpGraph(float *result, bool *visited) {
// Initialize all distances as INFINITE and stpSet[] as false
int index = threadIdx.x + blockIdx.x * blockDim.x;
visited[index] = false;
if(index == ((blockDim.x * blockIdx.x) + blockIdx.x))
result[index] = 0; // distance to itself is always 0
else result[index] = INT_MAX;
}
// ================== Function: gpu_dijkstra ====================
// performs dijkstra's algorithm for every vertice in the graph in separate cores
__global__ void gpu_dijkstra(float *graph, float *result, bool* visited, int V) {
// Find shortest path for all vertices
for (int count = 0; count < V-1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed.
int min = INT_MAX, u;
for (int v = 0; v < V; v++)
if (visited[(V * blockIdx.x) + v] == false && result[(V *blockIdx.x) + v] <= min)
min = result[(V * blockIdx.x) + v], u = v;
// Mark the picked vertex as processed
visited[(V * blockIdx.x) + u] = true;
// Update the wieght value
for (int v = 0; v < V; v++) {
// Update only if is not in visited, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value
if (!visited[(V * blockIdx.x) + v] && graph[(u*V) + v] && result[(V * blockIdx.x) + u] != INT_MAX
&& result[(V * blockIdx.x) + u] + graph[(u*V) + v] < result[(V * blockIdx.x) + v])
result[(V * blockIdx.x) + v] = result[(V*blockIdx.x) + u] + graph[(u*V) + v];
}
}
}
// // ================== Function: gpu_dijkstra_mutli_threaded ==================== (NOT WORKING)
// // performs dijkstra's algorithm for every vertice in the graph in separate cores
// __global__ void gpu_dijkstra_multi_threaded(float *graph, float *result, bool* visited, int V) {
// // Find shortest path for all vertices
// for (int count = 0; count < V-1; count++)
// {
// // Pick the minimum distance vertex from the set of vertices not
// // yet processed.
// int min = INT_MAX, u;
// if (visited[(V * blockIdx.x) + threadIdx.x] == false && result[(V *blockIdx.x) + threadIdx.x] <= min)
// min = result[(V * blockIdx.x) + threadIdx.x], u = threadIdx.x;
// // Mark the picked vertex as processed
// visited[(V * blockIdx.x) + u] = true;
// __syncthreads();
// // Update the wieght value
// // Update only if is not in visited, there is an edge from
// // u to v, and total weight of path from src to v through u is
// // smaller than current value
// if (!visited[(V * blockIdx.x) + threadIdx.x] && graph[(u*V) + threadIdx.x] && result[(V * blockIdx.x) + u] != INT_MAX
// && result[(V * blockIdx.x) + u] + graph[(u*V) + threadIdx.x] < result[(V * blockIdx.x) + threadIdx.x])
// result[(V * blockIdx.x) + threadIdx.x] = result[(V*blockIdx.x) + u] + graph[(u*V) + threadIdx.x];
// __syncthreads();
// }
// }
|
e85bad48cfbeec074ce1d12d2805358756e0d83b.cu
|
#include "header.h"
// ================== Function: gpu_setUpGraph ====================
// initializes all the unvisted vertices as infinity
// marks all vertices not visited
// sets the weight of distance to itself as 0
// all done in multiple cores / threads
__global__ void gpu_setUpGraph(float *result, bool *visited) {
// Initialize all distances as INFINITE and stpSet[] as false
int index = threadIdx.x + blockIdx.x * blockDim.x;
visited[index] = false;
if(index == ((blockDim.x * blockIdx.x) + blockIdx.x))
result[index] = 0; // distance to itself is always 0
else result[index] = INT_MAX;
}
// ================== Function: gpu_dijkstra ====================
// performs dijkstra's algorithm for every vertice in the graph in separate cores
__global__ void gpu_dijkstra(float *graph, float *result, bool* visited, int V) {
// Find shortest path for all vertices
for (int count = 0; count < V-1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed.
int min = INT_MAX, u;
for (int v = 0; v < V; v++)
if (visited[(V * blockIdx.x) + v] == false && result[(V *blockIdx.x) + v] <= min)
min = result[(V * blockIdx.x) + v], u = v;
// Mark the picked vertex as processed
visited[(V * blockIdx.x) + u] = true;
// Update the wieght value
for (int v = 0; v < V; v++) {
// Update only if is not in visited, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value
if (!visited[(V * blockIdx.x) + v] && graph[(u*V) + v] && result[(V * blockIdx.x) + u] != INT_MAX
&& result[(V * blockIdx.x) + u] + graph[(u*V) + v] < result[(V * blockIdx.x) + v])
result[(V * blockIdx.x) + v] = result[(V*blockIdx.x) + u] + graph[(u*V) + v];
}
}
}
// // ================== Function: gpu_dijkstra_mutli_threaded ==================== (NOT WORKING)
// // performs dijkstra's algorithm for every vertice in the graph in separate cores
// __global__ void gpu_dijkstra_multi_threaded(float *graph, float *result, bool* visited, int V) {
// // Find shortest path for all vertices
// for (int count = 0; count < V-1; count++)
// {
// // Pick the minimum distance vertex from the set of vertices not
// // yet processed.
// int min = INT_MAX, u;
// if (visited[(V * blockIdx.x) + threadIdx.x] == false && result[(V *blockIdx.x) + threadIdx.x] <= min)
// min = result[(V * blockIdx.x) + threadIdx.x], u = threadIdx.x;
// // Mark the picked vertex as processed
// visited[(V * blockIdx.x) + u] = true;
// __syncthreads();
// // Update the wieght value
// // Update only if is not in visited, there is an edge from
// // u to v, and total weight of path from src to v through u is
// // smaller than current value
// if (!visited[(V * blockIdx.x) + threadIdx.x] && graph[(u*V) + threadIdx.x] && result[(V * blockIdx.x) + u] != INT_MAX
// && result[(V * blockIdx.x) + u] + graph[(u*V) + threadIdx.x] < result[(V * blockIdx.x) + threadIdx.x])
// result[(V * blockIdx.x) + threadIdx.x] = result[(V*blockIdx.x) + u] + graph[(u*V) + threadIdx.x];
// __syncthreads();
// }
// }
|
e5859c7b2dad80ec4fead91fb71a8660ee5ccb64.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<ushort4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
|
e5859c7b2dad80ec4fead91fb71a8660ee5ccb64.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<ushort4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
09c0a7ad6f1cd7e87cc9ebba6df8ec98d738246a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*$Id: MarsSort.cu 721 2009-11-10 10:23:55Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef _SORT_CU_
#define _SORT_CU_
#include "MarsInc.h"
#include "compare.cu"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256//(512)
#define NUM_THREADS_CHUNK 256//(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int startCopy=SHARED_MEM_INT2-rLen;
if(tid>=startCopy)
{
d_output[tid-startCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int startBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+startBlock)<<1];
bs_pEnd=d_bound[((bid+startBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
//if(bid==82&& bs_pStart==6339)
// printf("%d, %d, %d\n", bs_pStart, bs_pEnd, bs_numElement);
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
//if(bid==82 && bs_pStart==6339)
// printf("tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
//if(6342==tid+bs_pStart)
// printf(")))tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int startCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int startPos, int rLen, cmp_type_t value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
d_data[pos]=value;
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
hipLaunchKernelGGL(( bitonicSortMultipleBlocks_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
hipDeviceSynchronize();
}
// hipDeviceSynchronize();
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
hipLaunchKernelGGL(( bitonicSortSingleBlock_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, rLen, d_output);
hipDeviceSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( initialize_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, value);
}
hipDeviceSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
CUDA_SAFE_CALL( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
CUDA_SAFE_CALL( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( unitBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( partBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int startPos, int rLen, int* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_input[pos];
d_output[pos]=value.y;
}
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.x;
d_output[pos].y=value.y;
}
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.z;
d_output[pos].y=value.w;
}
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.x=d_value[pos].x;
value.y=d_value[pos].y;
d_input[pos]=value;
}
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.z=d_value[pos].x;
value.w=d_value[pos].y;
d_input[pos]=value;
}
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getIntYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int startPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_Rin[pos];
int offset=value.x;
int size=value.y;
int startWritePos=d_sum[pos];
int i=0;
char *source=(char*)d_source;
char *dest=(char*)d_dest;
for(i=0;i<size;i++)
{
dest[i+startWritePos]=source[i+offset];
}
value.x=startWritePos;
d_Rin[pos]=value;
}
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int startPos, cmp_type_t *d_Rin,
int rLen, int* d_startArray)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int result=0;
if(pos==0)//the start position
{
result=1;
}
else
{
cmp_type_t cur=d_Rin[pos];
cmp_type_t left=d_Rin[pos-1];
if(getCompareValue(d_rawData, cur, left)!=0)
{
result=1;
}
}
d_startArray[pos]=result;
}
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int startPos, int numKey, int rLen,
int2* d_boundaryRange)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<numKey)
{
int2 flag;
flag.x=d_boundary[pos];
if((pos+1)!=numKey)
flag.y=d_boundary[pos+1];
else
flag.y=rLen;
d_boundaryRange[pos]=flag;
}
}
__global__ void writeBoundary_kernel(int startPos, int rLen, int* d_startArray,
int* d_startSumArray, int* d_bounary)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int flag=d_startArray[pos];
int writePos=d_startSumArray[pos];
if(flag==1)
d_bounary[writePos]=pos;
}
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
//extract the size information for each chunk
int* d_size;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_size), sizeof(int)*rLen) );
getIntYArray(d_Rin, rLen, d_size);
//compute the prefix sum for the output positions.
int* d_sum;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_sum), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_sum,d_size,rLen);
hipFree(d_size);
//output
int numThreadsPerBlock_x=128;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( copyChunks_kernel), dim3(grid),dim3(thread), 0, 0, d_source, start, d_Rin, rLen, d_sum, d_dest);
}
hipDeviceSynchronize();
hipFree(d_sum);
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
int resultNumChunks=0;
//get the chunk boundary[start of chunk0, start of chunk 1, ...]
int* d_startArray;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_startArray), sizeof(int)*rLen) );
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getChunkBoundary_kernel), dim3(grid),dim3(thread), 0, 0, d_source, start, d_Rin, rLen, d_startArray);
}
hipDeviceSynchronize();
//prefix sum for write positions.
int* d_startSumArray;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_startSumArray), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_startSumArray,d_startArray,rLen);
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
int lastValue=0;
int partialSum=0;
CUDA_SAFE_CALL( hipMemcpy( &lastValue, d_startArray+(rLen-1), sizeof(int), hipMemcpyDeviceToHost) );
//gpuPrint(d_startArray, rLen, "d_startArray");
CUDA_SAFE_CALL( hipMemcpy( &partialSum, d_startSumArray+(rLen-1), sizeof(int), hipMemcpyDeviceToHost) );
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
resultNumChunks=lastValue+partialSum;
int* d_boundary;//[start of chunk0, start of chunk 1, ...]
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_boundary), sizeof(int)*resultNumChunks) );
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( writeBoundary_kernel), dim3(grid),dim3(thread), 0, 0, start, rLen, d_startArray,
d_startSumArray, d_boundary);
}
hipFree(d_startArray);
hipFree(d_startSumArray);
//set the int2 boundary.
int2 *d_outputKeyListRange;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_outputKeyListRange), sizeof(int2)*resultNumChunks) );
numChunk=resultNumChunks/chunkSize;
if(resultNumChunks%chunkSize!=0)
numChunk++;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>resultNumChunks)
end=resultNumChunks;
hipLaunchKernelGGL(( setBoundaryInt2_kernel), dim3(grid),dim3(thread), 0, 0, d_boundary, start, resultNumChunks, rLen, d_outputKeyListRange);
}
hipDeviceSynchronize();
*h_outputKeyListRange=(int2*)malloc(sizeof(int2)*resultNumChunks);
CUDA_SAFE_CALL( hipMemcpy( *h_outputKeyListRange, d_outputKeyListRange, sizeof(int2)*resultNumChunks, hipMemcpyDeviceToHost) );
hipFree(d_boundary);
hipFree(d_outputKeyListRange);
return resultNumChunks;
}
int sort_GPU (void * d_inputKeyArray, int totalKeySize, void * d_inputValArray, int totalValueSize,
cmp_type_t * d_inputPointerArray, int rLen,
void * d_outputKeyArray, void * d_outputValArray,
cmp_type_t * d_outputPointerArray, int2 ** h_outputKeyListRange
)
{
//array_startTime(1);
int numDistinctKey=0;
int totalLenInBytes=-1;
bitonicSortGPU(d_inputKeyArray, totalLenInBytes, d_inputPointerArray, rLen, d_outputPointerArray);
//array_endTime("sort", 1);
//!we first scatter the values and then the keys. so that we can reuse d_PA.
int2 *d_PA;
CUDA_SAFE_CALL( hipMalloc( (void**) (&d_PA), sizeof(int2)*rLen) );
//scatter the values.
if(d_inputValArray!=NULL)
{
getZWArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputValArray, d_PA, rLen, d_outputValArray);
setZWArray(d_outputPointerArray, rLen, d_PA);
}
//scatter the keys.
if(d_inputKeyArray!=NULL)
{
getXYArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputKeyArray, d_PA, rLen, d_outputKeyArray);
setXYArray(d_outputPointerArray, rLen, d_PA);
}
//find the boudary for each key.
numDistinctKey=getChunkBoundary(d_outputKeyArray, d_outputPointerArray, rLen, h_outputKeyListRange);
return numDistinctKey;
}
#endif
|
09c0a7ad6f1cd7e87cc9ebba6df8ec98d738246a.cu
|
/*$Id: MarsSort.cu 721 2009-11-10 10:23:55Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef _SORT_CU_
#define _SORT_CU_
#include "MarsInc.h"
#include "compare.cu"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256//(512)
#define NUM_THREADS_CHUNK 256//(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int startCopy=SHARED_MEM_INT2-rLen;
if(tid>=startCopy)
{
d_output[tid-startCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int startBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+startBlock)<<1];
bs_pEnd=d_bound[((bid+startBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
//if(bid==82&& bs_pStart==6339)
// printf("%d, %d, %d\n", bs_pStart, bs_pEnd, bs_numElement);
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
//if(bid==82 && bs_pStart==6339)
// printf("tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
//if(6342==tid+bs_pStart)
// printf(")))tid %d, pos, %d, %d, %d, %d\n", tid,tid+bs_pStart, bs_pStart,bs_pEnd, d_values[tid+bs_pStart].x);
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int startCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int startPos, int rLen, cmp_type_t value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
d_data[pos]=value;
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
bitonicSortMultipleBlocks_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
cudaThreadSynchronize();
}
// cudaThreadSynchronize();
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
bitonicSortSingleBlock_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, rLen, d_output);
cudaThreadSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
initialize_kernel<<<grid,thread>>>(d_data, start, rLen, value);
}
cudaThreadSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
CUDA_SAFE_CALL( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
CUDA_SAFE_CALL( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
unitBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
partBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int startPos, int rLen, int* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_input[pos];
d_output[pos]=value.y;
}
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.x;
d_output[pos].y=value.y;
}
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
d_output[pos].x=value.z;
d_output[pos].y=value.w;
}
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.x=d_value[pos].x;
value.y=d_value[pos].y;
d_input[pos]=value;
}
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int startPos, int rLen, int2* d_value)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
cmp_type_t value=d_input[pos];
value.z=d_value[pos].x;
value.w=d_value[pos].y;
d_input[pos]=value;
}
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getIntYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int startPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_Rin[pos];
int offset=value.x;
int size=value.y;
int startWritePos=d_sum[pos];
int i=0;
char *source=(char*)d_source;
char *dest=(char*)d_dest;
for(i=0;i<size;i++)
{
dest[i+startWritePos]=source[i+offset];
}
value.x=startWritePos;
d_Rin[pos]=value;
}
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int startPos, cmp_type_t *d_Rin,
int rLen, int* d_startArray)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int result=0;
if(pos==0)//the start position
{
result=1;
}
else
{
cmp_type_t cur=d_Rin[pos];
cmp_type_t left=d_Rin[pos-1];
if(getCompareValue(d_rawData, cur, left)!=0)
{
result=1;
}
}
d_startArray[pos]=result;
}
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int startPos, int numKey, int rLen,
int2* d_boundaryRange)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<numKey)
{
int2 flag;
flag.x=d_boundary[pos];
if((pos+1)!=numKey)
flag.y=d_boundary[pos+1];
else
flag.y=rLen;
d_boundaryRange[pos]=flag;
}
}
__global__ void writeBoundary_kernel(int startPos, int rLen, int* d_startArray,
int* d_startSumArray, int* d_bounary)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int flag=d_startArray[pos];
int writePos=d_startSumArray[pos];
if(flag==1)
d_bounary[writePos]=pos;
}
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
//extract the size information for each chunk
int* d_size;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_size), sizeof(int)*rLen) );
getIntYArray(d_Rin, rLen, d_size);
//compute the prefix sum for the output positions.
int* d_sum;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_sum), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_sum,d_size,rLen);
cudaFree(d_size);
//output
int numThreadsPerBlock_x=128;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
copyChunks_kernel<<<grid,thread>>>(d_source, start, d_Rin, rLen, d_sum, d_dest);
}
cudaThreadSynchronize();
cudaFree(d_sum);
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
int resultNumChunks=0;
//get the chunk boundary[start of chunk0, start of chunk 1, ...]
int* d_startArray;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_startArray), sizeof(int)*rLen) );
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getChunkBoundary_kernel<<<grid,thread>>>(d_source, start, d_Rin, rLen, d_startArray);
}
cudaThreadSynchronize();
//prefix sum for write positions.
int* d_startSumArray;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_startSumArray), sizeof(int)*rLen) );
saven_initialPrefixSum(rLen);
prescanArray(d_startSumArray,d_startArray,rLen);
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
int lastValue=0;
int partialSum=0;
CUDA_SAFE_CALL( cudaMemcpy( &lastValue, d_startArray+(rLen-1), sizeof(int), cudaMemcpyDeviceToHost) );
//gpuPrint(d_startArray, rLen, "d_startArray");
CUDA_SAFE_CALL( cudaMemcpy( &partialSum, d_startSumArray+(rLen-1), sizeof(int), cudaMemcpyDeviceToHost) );
//gpuPrint(d_startSumArray, rLen, "d_startSumArray");
resultNumChunks=lastValue+partialSum;
int* d_boundary;//[start of chunk0, start of chunk 1, ...]
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_boundary), sizeof(int)*resultNumChunks) );
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
writeBoundary_kernel<<<grid,thread>>>(start, rLen, d_startArray,
d_startSumArray, d_boundary);
}
cudaFree(d_startArray);
cudaFree(d_startSumArray);
//set the int2 boundary.
int2 *d_outputKeyListRange;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_outputKeyListRange), sizeof(int2)*resultNumChunks) );
numChunk=resultNumChunks/chunkSize;
if(resultNumChunks%chunkSize!=0)
numChunk++;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>resultNumChunks)
end=resultNumChunks;
setBoundaryInt2_kernel<<<grid,thread>>>(d_boundary, start, resultNumChunks, rLen, d_outputKeyListRange);
}
cudaThreadSynchronize();
*h_outputKeyListRange=(int2*)malloc(sizeof(int2)*resultNumChunks);
CUDA_SAFE_CALL( cudaMemcpy( *h_outputKeyListRange, d_outputKeyListRange, sizeof(int2)*resultNumChunks, cudaMemcpyDeviceToHost) );
cudaFree(d_boundary);
cudaFree(d_outputKeyListRange);
return resultNumChunks;
}
int sort_GPU (void * d_inputKeyArray, int totalKeySize, void * d_inputValArray, int totalValueSize,
cmp_type_t * d_inputPointerArray, int rLen,
void * d_outputKeyArray, void * d_outputValArray,
cmp_type_t * d_outputPointerArray, int2 ** h_outputKeyListRange
)
{
//array_startTime(1);
int numDistinctKey=0;
int totalLenInBytes=-1;
bitonicSortGPU(d_inputKeyArray, totalLenInBytes, d_inputPointerArray, rLen, d_outputPointerArray);
//array_endTime("sort", 1);
//!we first scatter the values and then the keys. so that we can reuse d_PA.
int2 *d_PA;
CUDA_SAFE_CALL( cudaMalloc( (void**) (&d_PA), sizeof(int2)*rLen) );
//scatter the values.
if(d_inputValArray!=NULL)
{
getZWArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputValArray, d_PA, rLen, d_outputValArray);
setZWArray(d_outputPointerArray, rLen, d_PA);
}
//scatter the keys.
if(d_inputKeyArray!=NULL)
{
getXYArray(d_outputPointerArray, rLen, d_PA);
copyChunks(d_inputKeyArray, d_PA, rLen, d_outputKeyArray);
setXYArray(d_outputPointerArray, rLen, d_PA);
}
//find the boudary for each key.
numDistinctKey=getChunkBoundary(d_outputKeyArray, d_outputPointerArray, rLen, h_outputKeyListRange);
return numDistinctKey;
}
#endif
|
c3178a979cc35fd9eb6503aa826c6419855f716e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NEURAL NETWORK CUDA GPU 1.cu
* by Lut99
*
* Created:
* 5/25/2020, 9:30:27 PM
* Last edited:
* 5/25/2020, 9:42:54 PM
* Auto updated?
* Yes
*
* Description:
* This version implements the CUDA parts of the
* NeuralNetwork_CUDA_GPU1.c variation on the NeuralNetwork
* implementation. Specifically, it provides code for the NeuralNetwork's
* train function, and the kernels used there.
**/
#include "NeuralNetwork.h"
/***** CUDA KERNELS *****/
/* Kernel that computes the forward pass for a single layer. This version implements a sigmoid activation function.
* Parameters:
* @param outputs: a 2D, pitched array which will store the output of this layer (columns) for every sample (rows)
* @param outputs_pitch: the pitch of the outputs array
* @param biases: a list of biases for this layer
* @param weights: a pitched matrix of weights for this layer to the next
* @param weights_pitch: the pitch for this weights matrix
* @param inputs: a 2D, pitched array with inputs from the previous layer (columns) for every sample (rows)
* @param inputs_pitch: the pitch of the inputs array
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param n_samples: total number of samples to process
*/
__global__ void FwdPassKernel(double* outputs, size_t outputs_pitch,
double* biases,
double* weights, size_t weights_pitch,
double* inputs, size_t inputs_pitch,
size_t prev_nodes, size_t, this_nodes, size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// Sum the weighted inputs for this node (64 first iteration of l, 20 for second iteration)
double z = biases[n];
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double input_val = *((double*) ((char*) inputs + s * inputs_pitch) + prev_n);
double weight_val = *((double*) ((char*) weights + prev_n * weights_pitch) + n);
z += input_val * weight_val;
}
// Run the activation function over this input and store it in the output (using sigmoid)
double* output_ptr = (double*) ((char*) outputs + s * outputs_pitch) + n;
*output_ptr = 1 / (1 + exp(-z));
}
}
/* Kernel that computes the output layer-backward pass. This version implements Mean Square Error, and assumes
* a sigmoid activation function. Also note that the reduction of delta_weights should be done using a later
* kernel. Finally, note that the deltas are not explicitly returned, as they are equal to the delta_bias for this node.
* Parameters:
* @param delta_biases: a 2D, pitched matrix containing the delta_biases computed for each node in the output layer and each sample.
* @param delta_biases_pitch: the pitch of the delta_biases matrix.
* @param delta_weights: a 3D, pitched tensor containing the weight updates for the last layer across all samples.
* @param delta_weights_pitch: the pitch for the delta_weights 3D-array.
* @param layer_inputs: a 2D, pitched matrix containing the inputs for the last layer.
* @param layer_inputs_pitch: pitch for the layer_inputs.
* @param layer_outputs: a 2D, pitched matrix containing the outputs for the last layer.
* @param layer_outputs_pitch: pitch for the layer_inputs.
* @param expected: a 2D, pitched matrix containing the expected outputs for the output layer.
* @param expected_pitch: the pitch of the expected matrix.
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param n_samples: total number of samples to process
*/
__global__ void BckPassOutputKernel(double* delta_biases, size_t delta_biases_pitch,
double* delta_weights, size_t delta_weights_pitch,
double* layer_inputs, size_t layer_inputs_pitch,
double* layer_outputs, size_t layer_outputs_pitch,
double* expected, size_t expected_pitch,
size_t prev_nodes, size_t this_nodes, size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// First, compute the delta for this specific node and sample pair
double output_val = *((double*) ((char*) layer_outputs + s * layer_outputs_pitch) + n);
double expected_val = *((double*) ((char*) expected + s * expected_pitch) + n);
double delta = (expected_val - output_val) * output_val * (1 - output_val);
// Compute the change in biases (aka, store the deltas for the next node)
double* delta_biases_ptr = (double*) ((char*) delta_biases + s * delta_biases_pitch) + n;
*delta_biases_ptr = delta;
// Compute the weight updates
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double* delta_weight_ptr = (double*) ((char*) delta_weights + s * prev_n * delta_weights_pitch) + n;
double input_val = *((double*) ((char*) layer_inputs + s * layer_inputs_pitch) + n);
*delta_weight_ptr = input_val * delta;
}
}
}
/* Kernel that computes a hidden layer-backward pass. This version implements Mean Square Error, and assumes
* a sigmoid activation function. Also note that the reduction of delta_weights should be done using a later
* kernel. Finally, note that the deltas are not explicitly returned, as they are equal to the delta_bias for this node.
* Parameters:
* @param delta_biases: a 2D, pitched matrix containing the delta_biases computed for each node in the output layer and each sample.
* @param delta_biases_pitch: the pitch of the delta_biases matrix.
* @param delta_weights: a 3D, pitched tensor containing the weight updates for the last layer across all samples.
* @param delta_weights_pitch: the pitch for the delta_weights 3D-array.
* @param deltas: a 2D, pitched matrix containing the deltas computed for each node / sample pair in the previous layer.
* @param deltas_pitch: the pitch of the deltas matrix.
* @param weights: a 2D, pitched matrix containing the weights from this layer to the next one.
* @param weights_pitch: pitch for the weights array.
* @param layer_inputs: a 2D, pitched matrix containing the inputs for the last layer.
* @param layer_inputs_pitch: pitch for the layer_inputs.
* @param layer_outputs: a 2D, pitched matrix containing the outputs for the last layer.
* @param layer_outputs_pitch: pitch for the layer_inputs.
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param next_nodes: number of nodes in the layer after this one
* @param n_samples: total number of samples to process
*/
__global__ void BckPassHiddenKernel(double* delta_biases, size_t delta_biases_pitch,
double* delta_weights, size_t delta_weights_pitch,
double* deltas, size_t deltas_pitch,
double* weights, size_t weights_pitch,
double* layer_inputs, size_t layer_inputs_pitch,
double* layer_outputs, size_t layer_outputs_pitch,
size_t prev_nodes, size_t this_nodes, size_t next_nodes,
size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// Take the weighted sum of all connection of that node with this layer (10 iterations)
double error = 0;
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
double deltas_val = *((double*) ((char*) deltas + s * deltas_pitch) + n);
double weight_val = *((double*) ((char*) weights + n * weights_pitch) + next_n);
error += deltas_val * weight_val;
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = *((double*) ((char*) layer_outputs + s * layer_outputs_pitch) + n);
double delta = error * output_val * (1 - output_val);
// Compute the change in biases (aka, store the deltas for the next node)
double* delta_biases_ptr = (double*) ((char*) delta_biases + s * delta_biases_pitch) + n;
*delta_biases_ptr = delta;
// Compute the weight updates
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double* delta_weight_ptr = (double*) ((char*) delta_weights + s * prev_n * delta_weights_pitch) + n;
double input_val = *((double*) ((char*) layer_inputs + s * layer_inputs_pitch) + n);
*delta_weight_ptr = input_val * delta;
}
}
}
/***** NEURAL NETWORK OPERATIONS *****/
// Cuda memory help from https://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api
void nn_train(neural_net* nn, size_t n_samples, array* inputs[n_samples], array* expected[n_samples], double learning_rate, size_t n_iterations, double (*act)(double), double (*dydx_act)(double)) {
/***** GPU MEMORY INITIALISATION *****/
/* BIASES */
// Copy the biases of the network to the GPU. Since the lists have different lengths, it is important to not make it a 2D-array.
double* biases[nn->n_weights];
for (size_t l = 0; l < nn->n_layers - 1; l++) {
hipMalloc((void**) (biases + l), sizeof(double) * nn->nodes_per_layer[l + 1]);
hipMemcpy((void*) biases[l], nn->biases[l]->d, sizeof(double) * nn->nodes_per_layer[l + 1], hipMemcpyHostToDevice);
}
/* WEIGHTS */
// Also copy the weights in practically the same way, except that now the inner values are pitched arrays.
// We store the pitches in a similarly lengthy weights_pitches list.
double* weights[nn->n_weights];
size_t weights_pitches[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l];
size_t h = nn->nodes_per_layer[l + 1];
hipMallocPitch((void**) (weights + l), weights_pitches + l, w, h);
hipMemcpy2D((void*) weights[l], weights_pitches[l], (void*) nn->weights[l]->data, w, w, h, hipMemcpyHostToDevice);
}
/* LAYER OUTPUTS */
// The layer outputs is for every layer a matrix of samples by nodes_for_that_layer elements.
// Just as with the weights do we use pitches, and create a list to store those. Note that
// only the inputs need to be copied, as the rest serves as a buffer.
double* layer_outputs[nn->n_layers];
size_t layer_outputs_pitches[nn->n_layers];
for (size_t l = 0; l < nn->n_layers; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l];
size_t h = n_samples;
hipMallocPitch((void**) layer_outputs + l, layer_outputs_pitches + l, w, h);
}
// Copy all sample inputs. Because of the unhappy alginment of inputs, we have to do this manually row-by-row.
for (size_t s = 0; s < n_samples; s++) {
double* ptr = (double*) ((char*) layer_outputs[0] + s * layer_outputs_pitches[0]);
hipMemcpy((void*) ptr, (void*) inputs[s]->d, sizeof(double) * inputs[s]->size, hipMemcpyHostToDevice);
}
/* DELTA BIASES */
// We also have to declare the delta biases. Simultaneously, we allocate a host-side, zero-filled counterpart,
// so that resetting the deltas is nothing more than copying 0 over the old values.
double* delta_biases[nn->n_weights];
size_t delta_biases_pitches[nn->n_weights];
double* delta_biases_zero[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
// First, we allocate all memory
size_t this_nodes = nn->nodes_per_layer[l + 1];
hipMallocPitch((void**) (delta_biases + l), delta_biases_pitches + l, sizeof(double) * this_nodes, s_samples);
delta_biases_zero[l] = (double*) malloc(sizeof(double) * this_nodes * s_samples);
// Set the host-side array to 0
for (size_t s = 0; s < this_nodes; s++) {
for (size_t n = 0; n < this_nodes; n++) {
delta_biases_zero[l][s][n] = 0;
}
}
}
/* DELTA WEIGHTS */
// Declare the delta weights. Note that we pitch a 3D-array here. Not pretty, but better than
// the endless structs 3D require from us. Just as with the delta biases, create a host-side
// zero-filled one that is used for resetting.
double* delta_weights[nn->n_weights];
size_t delta_weights_pitches[nn->n_weights];
double* delta_weights_zero[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
// Prepare CUDA structs for allocation
size_t w = nn->nodes_per_layer[l + 1];
size_t h = nn->nodes_per_layer[l];
size_t d = n_samples;
// First, we allocate all memory
hipMallocPitch((void**) (delta_weights + l), delta_weights_pitches + l, sizeof(double) * w, h * d);
delta_biases_zero[l] = (double*) malloc(sizeof(double) * w * h * d);
// Set the host-side array to 0
for (size_t z = 0; z < d; z++) {
for (size_t y = 0; y < h; y++) {
for (size_t x = 0; x < w; x++) {
delta_biases_zero[l][z * (w * h) + y * w + x] = 0;
}
}
}
}
/* EXPECTED */
// The expected values are formatted as a 2D, n_samples x nodes_in_output_layer pitched matrix.
double* expected_gpu;
size_t expected_gpu_pitch;
hipMallocPitch((void**) &expecteds, &expecteds_pitch, sizeof(double) * nn->nodes_per_layer[nn->n_layers - 1], n_samples);
// Copy all expected values for each sample, which we have to do row-by-row due to unfortunate formatting of expected
for (size_t s = 0; s < n_samples; s++) {
double* ptr = (double*) ((char*) expected_gpu + s * expected_gpu_pitch);
hipMemcpy((void*) ptr, (void*) expected[s]->d, sizeof(double) * expected[s]->size, hipMemcpyHostToDevice);
}
/***** ITERATIONS *****/
// Choose block size
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
// Perform the training for n_iterations (always) (20,000 iterations, non-parallelizable)
for (size_t i = 0; i < n_iterations; i++) {
/***** FORWARD PASS *****/
// Loop through all layers forwardly so that we can compute errors later (2 iterations, non-parallelizable)
for (size_t l = 1; l < nn->n_layers; l++) {
// Call upon the actiation kernel (should do 1797 x 20 elements for first iteration, 1797 x 10 elements for second)
hipLaunchKernelGGL(( FwdPassKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
layer_outputs[l], layer_outputs_pitches[l],
biases[l - 1],
weights[l - 1], weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
nn->nodes_per_layer[l - 1], nn->nodes_per_layer[l], n_samples
);
}
/***** BACKWARD PASS *****/
// Reset the delta biases and delta weights by copying the host-side, 0-filled ones over them
for (size_t l = 0; l < nn->n_weights; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l + 1];
hipMemcpy2D((void*) delta_biases[l], delta_biases_pitches[l], (void*) delta_biases_zero[l], w, w, n_samples, hipMemcpyHostToDevice);
hipMemcpy2D((void*) delta_weights[l], delta_weights_pitches[l], (void*) delta_weights_zero[l], w, w, nn->nodes_per_layer[l] * n_samples, hipMemcpyHostToDevice);
}
// Then, compute the error at the output laye (1797 x 10 iterations)
size_t l = nn->n_layers - 1;
hipLaunchKernelGGL(( BckPassOutputKernel), dim3(blockPerGrid), dim3(threadsPerBlock), 0, 0,
delta_biases[l - 1], delta_biases_pitches[l - 1],
delta_weights[l - 1], delta_weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
layer_outputs[l], layer_outputs_pitches[l],
expected_gpu, expected_gpu_pitch,
nn->nodes_per_layer[nn->n_layers - 2], nn->nodes_per_layer[nn->n_layers - 1], n_samples
);
// Loop through all hidden layers in the other direction so that we can compute their weight updates (1 iteration, non-parallelizable)
for (l = nn->n_layers - 2; l > 0; l--) {
hipLaunchKernelGGL(( BckPassHiddenKernel), dim3(blockPerGrid), dim3(threadsPerBlock), 0, 0,
delta_biases[l - 1], delta_biases_pitches[l - 1],
delta_weights[l - 1], delta_weights_pitches[l - 1],
delta_biases[l], delta_biases_pitches[l],
weights[l - 1], weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
layer_outputs[l], layer_outputs_pitches[l],
nn->nodes_per_layer[l - 1], nn->nodes_per_layer[l], nn->nodes_per_layer[l + 1],
n_samples
);
}
/***** WEIGHT UPDATES *****/
// Actually update the weights, and reset the delta updates to 0 for next iteration (2 iterations)
for (size_t l = 1; l < nn->n_layers; l++) {
// 20 for first iteration of l, 10 for second iteration of l
for (size_t n = 0; n < nn->nodes_per_layer[l]; n++) {
nn->biases[l - 1]->d[n] += delta_biases[l - 1]->d[n] * learning_rate;
// 64 for first iteration of l, 20 for second iteration of l
for (size_t prev_n = 0; prev_n < nn->nodes_per_layer[l - 1]; prev_n++) {
INDEX(nn->weights[l - 1], prev_n, n) += INDEX(delta_weights[l - 1], prev_n, n) * learning_rate;
}
}
}
}
/***** CLEANUP *****/
/* BIASES & WEIGHTS */
// Simply loop through all layers (except the last one), and clean everything weight & bias related.
for (size_t l = 0; l < nn->n_layers - 1; l++) {
// Free the device-side stuff
hipFree(biases[l]);
hipFree(delta_biases[l]);
hipFree(weights[l]);
hipFree(delta_weights[l]);
// But don't forget the two host-side arrays
free(delta_weights_zero[l]);
free(delta_biases_zero[l]);
}
/* WEIGHTS */
/* LAYER OUTPUTS */
/* DELTA BIASES */
/* DELTA WEIGHTS */
/* EXPECTED */
}
|
c3178a979cc35fd9eb6503aa826c6419855f716e.cu
|
/* NEURAL NETWORK CUDA GPU 1.cu
* by Lut99
*
* Created:
* 5/25/2020, 9:30:27 PM
* Last edited:
* 5/25/2020, 9:42:54 PM
* Auto updated?
* Yes
*
* Description:
* This version implements the CUDA parts of the
* NeuralNetwork_CUDA_GPU1.c variation on the NeuralNetwork
* implementation. Specifically, it provides code for the NeuralNetwork's
* train function, and the kernels used there.
**/
#include "NeuralNetwork.h"
/***** CUDA KERNELS *****/
/* Kernel that computes the forward pass for a single layer. This version implements a sigmoid activation function.
* Parameters:
* @param outputs: a 2D, pitched array which will store the output of this layer (columns) for every sample (rows)
* @param outputs_pitch: the pitch of the outputs array
* @param biases: a list of biases for this layer
* @param weights: a pitched matrix of weights for this layer to the next
* @param weights_pitch: the pitch for this weights matrix
* @param inputs: a 2D, pitched array with inputs from the previous layer (columns) for every sample (rows)
* @param inputs_pitch: the pitch of the inputs array
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param n_samples: total number of samples to process
*/
__global__ void FwdPassKernel(double* outputs, size_t outputs_pitch,
double* biases,
double* weights, size_t weights_pitch,
double* inputs, size_t inputs_pitch,
size_t prev_nodes, size_t, this_nodes, size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// Sum the weighted inputs for this node (64 first iteration of l, 20 for second iteration)
double z = biases[n];
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double input_val = *((double*) ((char*) inputs + s * inputs_pitch) + prev_n);
double weight_val = *((double*) ((char*) weights + prev_n * weights_pitch) + n);
z += input_val * weight_val;
}
// Run the activation function over this input and store it in the output (using sigmoid)
double* output_ptr = (double*) ((char*) outputs + s * outputs_pitch) + n;
*output_ptr = 1 / (1 + exp(-z));
}
}
/* Kernel that computes the output layer-backward pass. This version implements Mean Square Error, and assumes
* a sigmoid activation function. Also note that the reduction of delta_weights should be done using a later
* kernel. Finally, note that the deltas are not explicitly returned, as they are equal to the delta_bias for this node.
* Parameters:
* @param delta_biases: a 2D, pitched matrix containing the delta_biases computed for each node in the output layer and each sample.
* @param delta_biases_pitch: the pitch of the delta_biases matrix.
* @param delta_weights: a 3D, pitched tensor containing the weight updates for the last layer across all samples.
* @param delta_weights_pitch: the pitch for the delta_weights 3D-array.
* @param layer_inputs: a 2D, pitched matrix containing the inputs for the last layer.
* @param layer_inputs_pitch: pitch for the layer_inputs.
* @param layer_outputs: a 2D, pitched matrix containing the outputs for the last layer.
* @param layer_outputs_pitch: pitch for the layer_inputs.
* @param expected: a 2D, pitched matrix containing the expected outputs for the output layer.
* @param expected_pitch: the pitch of the expected matrix.
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param n_samples: total number of samples to process
*/
__global__ void BckPassOutputKernel(double* delta_biases, size_t delta_biases_pitch,
double* delta_weights, size_t delta_weights_pitch,
double* layer_inputs, size_t layer_inputs_pitch,
double* layer_outputs, size_t layer_outputs_pitch,
double* expected, size_t expected_pitch,
size_t prev_nodes, size_t this_nodes, size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// First, compute the delta for this specific node and sample pair
double output_val = *((double*) ((char*) layer_outputs + s * layer_outputs_pitch) + n);
double expected_val = *((double*) ((char*) expected + s * expected_pitch) + n);
double delta = (expected_val - output_val) * output_val * (1 - output_val);
// Compute the change in biases (aka, store the deltas for the next node)
double* delta_biases_ptr = (double*) ((char*) delta_biases + s * delta_biases_pitch) + n;
*delta_biases_ptr = delta;
// Compute the weight updates
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double* delta_weight_ptr = (double*) ((char*) delta_weights + s * prev_n * delta_weights_pitch) + n;
double input_val = *((double*) ((char*) layer_inputs + s * layer_inputs_pitch) + n);
*delta_weight_ptr = input_val * delta;
}
}
}
/* Kernel that computes a hidden layer-backward pass. This version implements Mean Square Error, and assumes
* a sigmoid activation function. Also note that the reduction of delta_weights should be done using a later
* kernel. Finally, note that the deltas are not explicitly returned, as they are equal to the delta_bias for this node.
* Parameters:
* @param delta_biases: a 2D, pitched matrix containing the delta_biases computed for each node in the output layer and each sample.
* @param delta_biases_pitch: the pitch of the delta_biases matrix.
* @param delta_weights: a 3D, pitched tensor containing the weight updates for the last layer across all samples.
* @param delta_weights_pitch: the pitch for the delta_weights 3D-array.
* @param deltas: a 2D, pitched matrix containing the deltas computed for each node / sample pair in the previous layer.
* @param deltas_pitch: the pitch of the deltas matrix.
* @param weights: a 2D, pitched matrix containing the weights from this layer to the next one.
* @param weights_pitch: pitch for the weights array.
* @param layer_inputs: a 2D, pitched matrix containing the inputs for the last layer.
* @param layer_inputs_pitch: pitch for the layer_inputs.
* @param layer_outputs: a 2D, pitched matrix containing the outputs for the last layer.
* @param layer_outputs_pitch: pitch for the layer_inputs.
* @param prev_nodes: number of nodes in the layer before this one
* @param this_nodes: number of nodes in this layer
* @param next_nodes: number of nodes in the layer after this one
* @param n_samples: total number of samples to process
*/
__global__ void BckPassHiddenKernel(double* delta_biases, size_t delta_biases_pitch,
double* delta_weights, size_t delta_weights_pitch,
double* deltas, size_t deltas_pitch,
double* weights, size_t weights_pitch,
double* layer_inputs, size_t layer_inputs_pitch,
double* layer_outputs, size_t layer_outputs_pitch,
size_t prev_nodes, size_t this_nodes, size_t next_nodes,
size_t n_samples) {
// Get the index of this particular thread
int i = blockDim.x * blockIdx.x + threadIdx.x;
int s = i / this_nodes;
int n = i % this_nodes;
// Only do work if still within range
if (s < n_samples && n < this_nodes) {
// Take the weighted sum of all connection of that node with this layer (10 iterations)
double error = 0;
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
double deltas_val = *((double*) ((char*) deltas + s * deltas_pitch) + n);
double weight_val = *((double*) ((char*) weights + n * weights_pitch) + next_n);
error += deltas_val * weight_val;
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = *((double*) ((char*) layer_outputs + s * layer_outputs_pitch) + n);
double delta = error * output_val * (1 - output_val);
// Compute the change in biases (aka, store the deltas for the next node)
double* delta_biases_ptr = (double*) ((char*) delta_biases + s * delta_biases_pitch) + n;
*delta_biases_ptr = delta;
// Compute the weight updates
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
double* delta_weight_ptr = (double*) ((char*) delta_weights + s * prev_n * delta_weights_pitch) + n;
double input_val = *((double*) ((char*) layer_inputs + s * layer_inputs_pitch) + n);
*delta_weight_ptr = input_val * delta;
}
}
}
/***** NEURAL NETWORK OPERATIONS *****/
// Cuda memory help from https://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api
void nn_train(neural_net* nn, size_t n_samples, array* inputs[n_samples], array* expected[n_samples], double learning_rate, size_t n_iterations, double (*act)(double), double (*dydx_act)(double)) {
/***** GPU MEMORY INITIALISATION *****/
/* BIASES */
// Copy the biases of the network to the GPU. Since the lists have different lengths, it is important to not make it a 2D-array.
double* biases[nn->n_weights];
for (size_t l = 0; l < nn->n_layers - 1; l++) {
cudaMalloc((void**) (biases + l), sizeof(double) * nn->nodes_per_layer[l + 1]);
cudaMemcpy((void*) biases[l], nn->biases[l]->d, sizeof(double) * nn->nodes_per_layer[l + 1], cudaMemcpyHostToDevice);
}
/* WEIGHTS */
// Also copy the weights in practically the same way, except that now the inner values are pitched arrays.
// We store the pitches in a similarly lengthy weights_pitches list.
double* weights[nn->n_weights];
size_t weights_pitches[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l];
size_t h = nn->nodes_per_layer[l + 1];
cudaMallocPitch((void**) (weights + l), weights_pitches + l, w, h);
cudaMemcpy2D((void*) weights[l], weights_pitches[l], (void*) nn->weights[l]->data, w, w, h, cudaMemcpyHostToDevice);
}
/* LAYER OUTPUTS */
// The layer outputs is for every layer a matrix of samples by nodes_for_that_layer elements.
// Just as with the weights do we use pitches, and create a list to store those. Note that
// only the inputs need to be copied, as the rest serves as a buffer.
double* layer_outputs[nn->n_layers];
size_t layer_outputs_pitches[nn->n_layers];
for (size_t l = 0; l < nn->n_layers; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l];
size_t h = n_samples;
cudaMallocPitch((void**) layer_outputs + l, layer_outputs_pitches + l, w, h);
}
// Copy all sample inputs. Because of the unhappy alginment of inputs, we have to do this manually row-by-row.
for (size_t s = 0; s < n_samples; s++) {
double* ptr = (double*) ((char*) layer_outputs[0] + s * layer_outputs_pitches[0]);
cudaMemcpy((void*) ptr, (void*) inputs[s]->d, sizeof(double) * inputs[s]->size, cudaMemcpyHostToDevice);
}
/* DELTA BIASES */
// We also have to declare the delta biases. Simultaneously, we allocate a host-side, zero-filled counterpart,
// so that resetting the deltas is nothing more than copying 0 over the old values.
double* delta_biases[nn->n_weights];
size_t delta_biases_pitches[nn->n_weights];
double* delta_biases_zero[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
// First, we allocate all memory
size_t this_nodes = nn->nodes_per_layer[l + 1];
cudaMallocPitch((void**) (delta_biases + l), delta_biases_pitches + l, sizeof(double) * this_nodes, s_samples);
delta_biases_zero[l] = (double*) malloc(sizeof(double) * this_nodes * s_samples);
// Set the host-side array to 0
for (size_t s = 0; s < this_nodes; s++) {
for (size_t n = 0; n < this_nodes; n++) {
delta_biases_zero[l][s][n] = 0;
}
}
}
/* DELTA WEIGHTS */
// Declare the delta weights. Note that we pitch a 3D-array here. Not pretty, but better than
// the endless structs 3D require from us. Just as with the delta biases, create a host-side
// zero-filled one that is used for resetting.
double* delta_weights[nn->n_weights];
size_t delta_weights_pitches[nn->n_weights];
double* delta_weights_zero[nn->n_weights];
for (size_t l = 0; l < nn->n_weights; l++) {
// Prepare CUDA structs for allocation
size_t w = nn->nodes_per_layer[l + 1];
size_t h = nn->nodes_per_layer[l];
size_t d = n_samples;
// First, we allocate all memory
cudaMallocPitch((void**) (delta_weights + l), delta_weights_pitches + l, sizeof(double) * w, h * d);
delta_biases_zero[l] = (double*) malloc(sizeof(double) * w * h * d);
// Set the host-side array to 0
for (size_t z = 0; z < d; z++) {
for (size_t y = 0; y < h; y++) {
for (size_t x = 0; x < w; x++) {
delta_biases_zero[l][z * (w * h) + y * w + x] = 0;
}
}
}
}
/* EXPECTED */
// The expected values are formatted as a 2D, n_samples x nodes_in_output_layer pitched matrix.
double* expected_gpu;
size_t expected_gpu_pitch;
cudaMallocPitch((void**) &expecteds, &expecteds_pitch, sizeof(double) * nn->nodes_per_layer[nn->n_layers - 1], n_samples);
// Copy all expected values for each sample, which we have to do row-by-row due to unfortunate formatting of expected
for (size_t s = 0; s < n_samples; s++) {
double* ptr = (double*) ((char*) expected_gpu + s * expected_gpu_pitch);
cudaMemcpy((void*) ptr, (void*) expected[s]->d, sizeof(double) * expected[s]->size, cudaMemcpyHostToDevice);
}
/***** ITERATIONS *****/
// Choose block size
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
// Perform the training for n_iterations (always) (20,000 iterations, non-parallelizable)
for (size_t i = 0; i < n_iterations; i++) {
/***** FORWARD PASS *****/
// Loop through all layers forwardly so that we can compute errors later (2 iterations, non-parallelizable)
for (size_t l = 1; l < nn->n_layers; l++) {
// Call upon the actiation kernel (should do 1797 x 20 elements for first iteration, 1797 x 10 elements for second)
FwdPassKernel<<<blocksPerGrid, threadsPerBlock>>>(
layer_outputs[l], layer_outputs_pitches[l],
biases[l - 1],
weights[l - 1], weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
nn->nodes_per_layer[l - 1], nn->nodes_per_layer[l], n_samples
);
}
/***** BACKWARD PASS *****/
// Reset the delta biases and delta weights by copying the host-side, 0-filled ones over them
for (size_t l = 0; l < nn->n_weights; l++) {
size_t w = sizeof(double) * nn->nodes_per_layer[l + 1];
cudaMemcpy2D((void*) delta_biases[l], delta_biases_pitches[l], (void*) delta_biases_zero[l], w, w, n_samples, cudaMemcpyHostToDevice);
cudaMemcpy2D((void*) delta_weights[l], delta_weights_pitches[l], (void*) delta_weights_zero[l], w, w, nn->nodes_per_layer[l] * n_samples, cudaMemcpyHostToDevice);
}
// Then, compute the error at the output laye (1797 x 10 iterations)
size_t l = nn->n_layers - 1;
BckPassOutputKernel<<<blockPerGrid, threadsPerBlock>>>(
delta_biases[l - 1], delta_biases_pitches[l - 1],
delta_weights[l - 1], delta_weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
layer_outputs[l], layer_outputs_pitches[l],
expected_gpu, expected_gpu_pitch,
nn->nodes_per_layer[nn->n_layers - 2], nn->nodes_per_layer[nn->n_layers - 1], n_samples
);
// Loop through all hidden layers in the other direction so that we can compute their weight updates (1 iteration, non-parallelizable)
for (l = nn->n_layers - 2; l > 0; l--) {
BckPassHiddenKernel<<<blockPerGrid, threadsPerBlock>>>(
delta_biases[l - 1], delta_biases_pitches[l - 1],
delta_weights[l - 1], delta_weights_pitches[l - 1],
delta_biases[l], delta_biases_pitches[l],
weights[l - 1], weights_pitches[l - 1],
layer_outputs[l - 1], layer_outputs_pitches[l - 1],
layer_outputs[l], layer_outputs_pitches[l],
nn->nodes_per_layer[l - 1], nn->nodes_per_layer[l], nn->nodes_per_layer[l + 1],
n_samples
);
}
/***** WEIGHT UPDATES *****/
// Actually update the weights, and reset the delta updates to 0 for next iteration (2 iterations)
for (size_t l = 1; l < nn->n_layers; l++) {
// 20 for first iteration of l, 10 for second iteration of l
for (size_t n = 0; n < nn->nodes_per_layer[l]; n++) {
nn->biases[l - 1]->d[n] += delta_biases[l - 1]->d[n] * learning_rate;
// 64 for first iteration of l, 20 for second iteration of l
for (size_t prev_n = 0; prev_n < nn->nodes_per_layer[l - 1]; prev_n++) {
INDEX(nn->weights[l - 1], prev_n, n) += INDEX(delta_weights[l - 1], prev_n, n) * learning_rate;
}
}
}
}
/***** CLEANUP *****/
/* BIASES & WEIGHTS */
// Simply loop through all layers (except the last one), and clean everything weight & bias related.
for (size_t l = 0; l < nn->n_layers - 1; l++) {
// Free the device-side stuff
cudaFree(biases[l]);
cudaFree(delta_biases[l]);
cudaFree(weights[l]);
cudaFree(delta_weights[l]);
// But don't forget the two host-side arrays
free(delta_weights_zero[l]);
free(delta_biases_zero[l]);
}
/* WEIGHTS */
/* LAYER OUTPUTS */
/* DELTA BIASES */
/* DELTA WEIGHTS */
/* EXPECTED */
}
|
bee9f22ac60e5924cac7bf66cbbe90f15a5ad54e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
bee9f22ac60e5924cac7bf66cbbe90f15a5ad54e.cu
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
36dc9fdfa3e6a0b39efcdd2a4bbc830f97a9fe65.hip
|
// !!! This is a file automatically generated by hipify!!!
// Inclusions
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
// CUDA NVIDIA lib
#include <hip/hip_runtime.h>
// Common lib
#include "common_gpu.h"
// Adapted mesh for GPU
//#include <matrixCells_gpu.h>
// Number of threads (as in Vanilla)
#define NUM_THREADS 256
// Auxiliary constant for defining mesh dimensions (for 1 axis)
const double meshDim = 2.0 * cutoff;
// Auxiliary parameters
int xmesh, Nmeshs;
double meshSize;
// Size initialization
extern double size;
//
// benchmarking program
//
//////////////////////////////////////////////////////////////////
//
// Main program
//
//////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
//////////////////////////////////////////////////////////////////
//
// Same as Vanilla section
//
//////////////////////////////////////////////////////////////////
// This takes a few seconds to initialize the runtime
hipDeviceSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
return 0;
}
// Total number of particles
int n = read_int( argc, argv, "-n", 1000 );
// Name of the output file
char *savename = read_string( argc, argv, "-o", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
// GPU particle data structure
particle_t * d_particles;
hipMalloc((void **) &d_particles, n * sizeof(particle_t));
// Number of particles n
set_size( n );
// Initialize particles (not optimized)
init_particles( n, particles );
// Synchronization
hipDeviceSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
hipMemcpy(d_particles, particles, n * sizeof(particle_t), hipMemcpyHostToDevice);
// Synchronization
hipDeviceSynchronize();
copy_time = read_timer( ) - copy_time;
//////////////////////////////////////////////////////////////////
//
// End same as Vanilla section
//
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//
// Meshs are initialized
//
//////////////////////////////////////////////////////////////////
// Mesh initialization time stats
double meshInitTime = read_timer();
// Auxiliary parameters initialization
// One direction dimension for the mesh
xmesh = (int) ceil (size * 1.0 / meshDim);
// Total number of grids by multiplying xmesh * xmesh
Nmeshs = xmesh * xmesh;
// Mesh size
meshSize = size / xmesh;
// Compute the number of blocks based on the number of threads
int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// Compute the number of "mesh blocks" based on the number of threads
int meshBlocks = (Nmeshs + NUM_THREADS - 1) / NUM_THREADS;
// Initialize submesh and adj pointers and allocate memory
int * submesh;
hipMalloc((void **) &submesh, Nmeshs * sizeof(int));
int * adj;
hipMalloc((void **) &adj, n * sizeof(int));
// Synchronization step
hipDeviceSynchronize();
// Clear the mesh multi-threaded: Kernel invocation with NUM_THREADS threads
// From NVIDIA: Here, each of the N threads that execute
// clear, do that with the submeshs
hipLaunchKernelGGL(( clear) , dim3(meshBlocks), dim3(NUM_THREADS) , 0, 0, Nmeshs, submesh);
// Assign the particles multi-threaded: particles are assigned to
hipLaunchKernelGGL(( push2mesh_gpu) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_particles, n, adj, submesh, meshSize, xmesh);
hipDeviceSynchronize();
// Calculate the total amount of time spent creating the underlying data structure
meshInitTime = read_timer() - meshInitTime;
//////////////////////////////////////////////////////////////////
//
// End Mesh section
//
//////////////////////////////////////////////////////////////////
//
// simulate a number of time steps (Vanilla)
//
// Synchronization step
hipDeviceSynchronize();
// Init simulation timer
double simulation_time = read_timer( );
// Main loop
for( int step = 0; step < NSTEPS; step++ )
{
//
// compute forces
//
// Compute forces multi-threaded: forces are computed using the mesh structure
hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_particles, n, adj, submesh, meshSize, xmesh);
//
// move particles (Vanilla)
//
hipLaunchKernelGGL(( move_gpu) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size);
// Update the particles inside the mesh
// Clear the meshs (multi-threaded)
hipLaunchKernelGGL(( clear) , dim3(meshBlocks), dim3(NUM_THREADS) , 0, 0, Nmeshs, submesh);
// Push particles to meshs: multi-threaded
hipLaunchKernelGGL(( push2mesh_gpu) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_particles, n, adj, submesh, meshSize, xmesh);
//
// save if necessary (Vanilla)
//
if( fsave && (step%SAVEFREQ) == 0 ) {
// Copy the particles back to the CPU
hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost);
save( fsave, n, particles);
}
}
// Synchronization step and compute total simulation time (as in Vanilla)
hipDeviceSynchronize();
simulation_time = read_timer( ) - simulation_time;
// Print information about the simulation and GPU (Vanilla + mesh info)
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "GPU mesh initialization time = %g seconds\n", meshInitTime);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
// Release resources (Vanilla)
free( particles );
hipFree(d_particles);
// Specifics from mesh approach
hipFree(submesh);
hipFree(adj);
// Close file if open
if( fsave )
fclose( fsave );
// End of the main
return 0;
}
|
36dc9fdfa3e6a0b39efcdd2a4bbc830f97a9fe65.cu
|
// Inclusions
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
// CUDA NVIDIA lib
#include <cuda.h>
// Common lib
#include "common_gpu.h"
// Adapted mesh for GPU
//#include <matrixCells_gpu.h>
// Number of threads (as in Vanilla)
#define NUM_THREADS 256
// Auxiliary constant for defining mesh dimensions (for 1 axis)
const double meshDim = 2.0 * cutoff;
// Auxiliary parameters
int xmesh, Nmeshs;
double meshSize;
// Size initialization
extern double size;
//
// benchmarking program
//
//////////////////////////////////////////////////////////////////
//
// Main program
//
//////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
//////////////////////////////////////////////////////////////////
//
// Same as Vanilla section
//
//////////////////////////////////////////////////////////////////
// This takes a few seconds to initialize the runtime
cudaThreadSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
return 0;
}
// Total number of particles
int n = read_int( argc, argv, "-n", 1000 );
// Name of the output file
char *savename = read_string( argc, argv, "-o", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
// GPU particle data structure
particle_t * d_particles;
cudaMalloc((void **) &d_particles, n * sizeof(particle_t));
// Number of particles n
set_size( n );
// Initialize particles (not optimized)
init_particles( n, particles );
// Synchronization
cudaThreadSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice);
// Synchronization
cudaThreadSynchronize();
copy_time = read_timer( ) - copy_time;
//////////////////////////////////////////////////////////////////
//
// End same as Vanilla section
//
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//
// Meshs are initialized
//
//////////////////////////////////////////////////////////////////
// Mesh initialization time stats
double meshInitTime = read_timer();
// Auxiliary parameters initialization
// One direction dimension for the mesh
xmesh = (int) ceil (size * 1.0 / meshDim);
// Total number of grids by multiplying xmesh * xmesh
Nmeshs = xmesh * xmesh;
// Mesh size
meshSize = size / xmesh;
// Compute the number of blocks based on the number of threads
int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// Compute the number of "mesh blocks" based on the number of threads
int meshBlocks = (Nmeshs + NUM_THREADS - 1) / NUM_THREADS;
// Initialize submesh and adj pointers and allocate memory
int * submesh;
cudaMalloc((void **) &submesh, Nmeshs * sizeof(int));
int * adj;
cudaMalloc((void **) &adj, n * sizeof(int));
// Synchronization step
cudaThreadSynchronize();
// Clear the mesh multi-threaded: Kernel invocation with NUM_THREADS threads
// From NVIDIA: Here, each of the N threads that execute
// clear, do that with the submeshs
clear <<< meshBlocks, NUM_THREADS >>> (Nmeshs, submesh);
// Assign the particles multi-threaded: particles are assigned to
push2mesh_gpu <<< blocks, NUM_THREADS >>> (d_particles, n, adj, submesh, meshSize, xmesh);
cudaThreadSynchronize();
// Calculate the total amount of time spent creating the underlying data structure
meshInitTime = read_timer() - meshInitTime;
//////////////////////////////////////////////////////////////////
//
// End Mesh section
//
//////////////////////////////////////////////////////////////////
//
// simulate a number of time steps (Vanilla)
//
// Synchronization step
cudaThreadSynchronize();
// Init simulation timer
double simulation_time = read_timer( );
// Main loop
for( int step = 0; step < NSTEPS; step++ )
{
//
// compute forces
//
// Compute forces multi-threaded: forces are computed using the mesh structure
compute_forces_gpu <<< blocks, NUM_THREADS >>> (d_particles, n, adj, submesh, meshSize, xmesh);
//
// move particles (Vanilla)
//
move_gpu <<< blocks, NUM_THREADS >>> (d_particles, n, size);
// Update the particles inside the mesh
// Clear the meshs (multi-threaded)
clear <<< meshBlocks, NUM_THREADS >>> (Nmeshs, submesh);
// Push particles to meshs: multi-threaded
push2mesh_gpu <<< blocks, NUM_THREADS >>> (d_particles, n, adj, submesh, meshSize, xmesh);
//
// save if necessary (Vanilla)
//
if( fsave && (step%SAVEFREQ) == 0 ) {
// Copy the particles back to the CPU
cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost);
save( fsave, n, particles);
}
}
// Synchronization step and compute total simulation time (as in Vanilla)
cudaThreadSynchronize();
simulation_time = read_timer( ) - simulation_time;
// Print information about the simulation and GPU (Vanilla + mesh info)
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "GPU mesh initialization time = %g seconds\n", meshInitTime);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
// Release resources (Vanilla)
free( particles );
cudaFree(d_particles);
// Specifics from mesh approach
cudaFree(submesh);
cudaFree(adj);
// Close file if open
if( fsave )
fclose( fsave );
// End of the main
return 0;
}
|
1b6c031e44389e1e7a93a798e7e16d3ceb057cf0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstdio>
#include <hipcub/hipcub.hpp>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../profiler.h"
#include "../timer.h"
#include "cuda_frequency_hashmap.h"
#include "cuda_function.h"
#include "cuda_utils.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void sample_random_walk(
const IdType *indptr, const IdType *indices, const IdType *input,
const size_t num_input, const size_t random_walk_length,
const double restart_prob, const size_t num_random_walk, IdType *tmp_src,
IdType *tmp_dst, hiprandState_t *random_states, size_t num_random_states) {
size_t thread_id = blockDim.x * blockDim.y * blockIdx.x +
blockDim.y * threadIdx.x + threadIdx.y;
assert(thread_id < num_random_states);
hiprandState_t local_state = random_states[thread_id];
size_t node_idx = blockIdx.x * blockDim.y + threadIdx.y;
const size_t stride = blockDim.y * gridDim.x;
/** SXN: this loop is also useless*/
while (node_idx < num_input) {
IdType start_node = input[node_idx];
size_t random_walk_idx = threadIdx.x;
while (random_walk_idx < num_random_walk) {
IdType node = start_node;
for (size_t step_idx = 0; step_idx < random_walk_length; step_idx++) {
/*
* Get the position on the output position of random walk
* suppose that num_random_walk = 2, num_input = 2
*
* layout:
* [first step of walk 0 of node 0]
* [first step of walk 1 of node 0]
* [second step of walk 0 of node 0]
* [second step of walk 1 of node 0]
* [first step of walk 0 of node 1]
* [first step of walk 1 of node 1]
* [second step of walk 0 of node 1]
* [second step of walk 1 of node 1]
* ......
*/
size_t pos = node_idx * num_random_walk * random_walk_length +
step_idx * num_random_walk + random_walk_idx;
if (node == Constant::kEmptyKey) {
tmp_src[pos] = Constant::kEmptyKey;
} else {
const IdType off = indptr[node];
const IdType len = indptr[node + 1] - indptr[node];
if (len == 0) {
tmp_src[pos] = Constant::kEmptyKey;
node = Constant::kEmptyKey;
} else {
size_t k = hiprand(&local_state) % len;
tmp_src[pos] = start_node;
tmp_dst[pos] = indices[off + k];
node = indices[off + k];
// terminate
if (hiprand_uniform_double(&local_state) < restart_prob) {
node = Constant::kEmptyKey;
}
}
}
}
random_walk_idx += blockDim.x;
}
node_idx += stride;
}
// restore the state
random_states[thread_id] = local_state;
}
} // namespace
void GPUSampleRandomWalk(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t random_walk_length,
const double random_walk_restart_prob,
const size_t num_random_walk, const size_t K,
IdType *out_src, IdType *out_dst, IdType *out_data,
size_t *num_out, FrequencyHashmap *frequency_hashmap,
Context ctx, StreamHandle stream,
GPURandomStates *random_states, uint64_t task_key) {
auto sampler_device = Device::Get(ctx);
auto cu_stream = static_cast<hipStream_t>(stream);
size_t num_samples = num_input * num_random_walk * random_walk_length;
// 1. random walk sampling
Timer t0;
IdType *tmp_src = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_samples));
IdType *tmp_dst = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_samples));
dim3 block(Constant::kCudaBlockSize, 1);
while (static_cast<size_t>(block.x) >= 2 * num_random_walk) {
block.x /= 2;
block.y *= 2;
}
const dim3 grid(RoundUpDiv(num_input, static_cast<size_t>(block.y)));
hipLaunchKernelGGL(( sample_random_walk), dim3(grid), dim3(block), 0, cu_stream,
indptr, indices, input, num_input, random_walk_length,
random_walk_restart_prob, num_random_walk, tmp_src, tmp_dst,
random_states->GetStates(), random_states->NumStates());
sampler_device->StreamSync(ctx, stream);
double random_walk_sampling_time = t0.Passed();
// 2. TopK
Timer t1;
frequency_hashmap->GetTopK(tmp_src, tmp_dst, num_samples, input, num_input, K,
out_src, out_dst, out_data, num_out, stream,
task_key);
sampler_device->FreeWorkspace(ctx, tmp_dst);
sampler_device->FreeWorkspace(ctx, tmp_src);
double topk_time = t1.Passed();
Profiler::Get().LogStepAdd(task_key, kLogL3RandomWalkSampleCooTime,
random_walk_sampling_time);
Profiler::Get().LogStepAdd(task_key, kLogL3RandomWalkTopKTime, topk_time);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
|
1b6c031e44389e1e7a93a798e7e16d3ceb057cf0.cu
|
/*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <curand.h>
#include <curand_kernel.h>
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cub/cub.cuh>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../profiler.h"
#include "../timer.h"
#include "cuda_frequency_hashmap.h"
#include "cuda_function.h"
#include "cuda_utils.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void sample_random_walk(
const IdType *indptr, const IdType *indices, const IdType *input,
const size_t num_input, const size_t random_walk_length,
const double restart_prob, const size_t num_random_walk, IdType *tmp_src,
IdType *tmp_dst, curandState *random_states, size_t num_random_states) {
size_t thread_id = blockDim.x * blockDim.y * blockIdx.x +
blockDim.y * threadIdx.x + threadIdx.y;
assert(thread_id < num_random_states);
curandState local_state = random_states[thread_id];
size_t node_idx = blockIdx.x * blockDim.y + threadIdx.y;
const size_t stride = blockDim.y * gridDim.x;
/** SXN: this loop is also useless*/
while (node_idx < num_input) {
IdType start_node = input[node_idx];
size_t random_walk_idx = threadIdx.x;
while (random_walk_idx < num_random_walk) {
IdType node = start_node;
for (size_t step_idx = 0; step_idx < random_walk_length; step_idx++) {
/*
* Get the position on the output position of random walk
* suppose that num_random_walk = 2, num_input = 2
*
* layout:
* [first step of walk 0 of node 0]
* [first step of walk 1 of node 0]
* [second step of walk 0 of node 0]
* [second step of walk 1 of node 0]
* [first step of walk 0 of node 1]
* [first step of walk 1 of node 1]
* [second step of walk 0 of node 1]
* [second step of walk 1 of node 1]
* ......
*/
size_t pos = node_idx * num_random_walk * random_walk_length +
step_idx * num_random_walk + random_walk_idx;
if (node == Constant::kEmptyKey) {
tmp_src[pos] = Constant::kEmptyKey;
} else {
const IdType off = indptr[node];
const IdType len = indptr[node + 1] - indptr[node];
if (len == 0) {
tmp_src[pos] = Constant::kEmptyKey;
node = Constant::kEmptyKey;
} else {
size_t k = curand(&local_state) % len;
tmp_src[pos] = start_node;
tmp_dst[pos] = indices[off + k];
node = indices[off + k];
// terminate
if (curand_uniform_double(&local_state) < restart_prob) {
node = Constant::kEmptyKey;
}
}
}
}
random_walk_idx += blockDim.x;
}
node_idx += stride;
}
// restore the state
random_states[thread_id] = local_state;
}
} // namespace
void GPUSampleRandomWalk(const IdType *indptr, const IdType *indices,
const IdType *input, const size_t num_input,
const size_t random_walk_length,
const double random_walk_restart_prob,
const size_t num_random_walk, const size_t K,
IdType *out_src, IdType *out_dst, IdType *out_data,
size_t *num_out, FrequencyHashmap *frequency_hashmap,
Context ctx, StreamHandle stream,
GPURandomStates *random_states, uint64_t task_key) {
auto sampler_device = Device::Get(ctx);
auto cu_stream = static_cast<cudaStream_t>(stream);
size_t num_samples = num_input * num_random_walk * random_walk_length;
// 1. random walk sampling
Timer t0;
IdType *tmp_src = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_samples));
IdType *tmp_dst = static_cast<IdType *>(
sampler_device->AllocWorkspace(ctx, sizeof(IdType) * num_samples));
dim3 block(Constant::kCudaBlockSize, 1);
while (static_cast<size_t>(block.x) >= 2 * num_random_walk) {
block.x /= 2;
block.y *= 2;
}
const dim3 grid(RoundUpDiv(num_input, static_cast<size_t>(block.y)));
sample_random_walk<<<grid, block, 0, cu_stream>>>(
indptr, indices, input, num_input, random_walk_length,
random_walk_restart_prob, num_random_walk, tmp_src, tmp_dst,
random_states->GetStates(), random_states->NumStates());
sampler_device->StreamSync(ctx, stream);
double random_walk_sampling_time = t0.Passed();
// 2. TopK
Timer t1;
frequency_hashmap->GetTopK(tmp_src, tmp_dst, num_samples, input, num_input, K,
out_src, out_dst, out_data, num_out, stream,
task_key);
sampler_device->FreeWorkspace(ctx, tmp_dst);
sampler_device->FreeWorkspace(ctx, tmp_src);
double topk_time = t1.Passed();
Profiler::Get().LogStepAdd(task_key, kLogL3RandomWalkSampleCooTime,
random_walk_sampling_time);
Profiler::Get().LogStepAdd(task_key, kLogL3RandomWalkTopKTime, topk_time);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
|
08c7ffd8719ba4d4961f0cfd40e9f0505cfb7b8f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zsymmetrize.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( ssymmetrize_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( ssymmetrize_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda );
}
}
/**
@see magmablas_ssymmetrize_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda )
{
magmablas_ssymmetrize_q( uplo, m, dA, ldda, magma_stream );
}
|
08c7ffd8719ba4d4961f0cfd40e9f0505cfb7b8f.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zsymmetrize.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( uplo == MagmaUpper ) {
ssymmetrize_upper<<< grid, threads, 0, queue >>>( m, dA, ldda );
}
else {
ssymmetrize_lower<<< grid, threads, 0, queue >>>( m, dA, ldda );
}
}
/**
@see magmablas_ssymmetrize_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda )
{
magmablas_ssymmetrize_q( uplo, m, dA, ldda, magma_stream );
}
|
571795c18df4ebce1ca3520ffa51653fb0b7bad6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
long long remaining_N2(int , int ,long long );
long long remaining_N(int , int ,int );
__global__ void ker2(float * cormat, float * upper,int n1,int n,long long upper_size,int N,int i_so_far,long long M1)
{
long long idx = blockDim.x;
idx*=blockIdx.x;
idx+=threadIdx.x;
long i = idx/n;
long j = idx%n;
if(i<j && i<n1 && j<n)// &&i<N &&j<N && idx<(n1*n))
{
long long tmp=i;
tmp*=(i+1);
tmp/=2;
long long tmp_2=i;
tmp_2*=n;
tmp_2=tmp_2-tmp;
tmp_2+=j;
tmp_2-=i;
long long indexi=n1;
indexi*=j;
indexi=indexi+i;
upper[tmp_2-1]=cormat[indexi];
//if((i==39001 &&j == 69999)||(i==1 && j==2))
// printf("\n\n\n thread: %f ",upper[tmp_2-1]," ",cormat[indexi]);
}
}
|
571795c18df4ebce1ca3520ffa51653fb0b7bad6.cu
|
#include "includes.h"
using namespace std;
long long remaining_N2(int , int ,long long );
long long remaining_N(int , int ,int );
__global__ void ker2(float * cormat, float * upper,int n1,int n,long long upper_size,int N,int i_so_far,long long M1)
{
long long idx = blockDim.x;
idx*=blockIdx.x;
idx+=threadIdx.x;
long i = idx/n;
long j = idx%n;
if(i<j && i<n1 && j<n)// &&i<N &&j<N && idx<(n1*n))
{
long long tmp=i;
tmp*=(i+1);
tmp/=2;
long long tmp_2=i;
tmp_2*=n;
tmp_2=tmp_2-tmp;
tmp_2+=j;
tmp_2-=i;
long long indexi=n1;
indexi*=j;
indexi=indexi+i;
upper[tmp_2-1]=cormat[indexi];
//if((i==39001 &&j == 69999)||(i==1 && j==2))
// printf("\n\n\n thread: %f ",upper[tmp_2-1]," ",cormat[indexi]);
}
}
|
27abf123c1037c2165d6f47e0590ee5f310ac66b.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2018, The University of Oxford. See LICENSE file. */
#include "splines/define_dierckx_bispev_bicubic.h"
#include "utility/oskar_cuda_registrar.h"
#include "utility/oskar_kernel_macros.h"
#include "utility/oskar_vector_types.h"
#include <hip/hip_runtime.h>
/* Kernels */
#define Real float
#include "splines/src/oskar_splines.cl"
#undef Real
#define Real double
#include "splines/src/oskar_splines.cl"
#undef Real
|
27abf123c1037c2165d6f47e0590ee5f310ac66b.cu
|
/* Copyright (c) 2018, The University of Oxford. See LICENSE file. */
#include "splines/define_dierckx_bispev_bicubic.h"
#include "utility/oskar_cuda_registrar.h"
#include "utility/oskar_kernel_macros.h"
#include "utility/oskar_vector_types.h"
#include <cuda_runtime.h>
/* Kernels */
#define Real float
#include "splines/src/oskar_splines.cl"
#undef Real
#define Real double
#include "splines/src/oskar_splines.cl"
#undef Real
|
c287034a500ee0b538cdc634d948b0280a84c485.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
/*
* An example of using a statically declared global variable (devData) to store
* a floating-point value on the device.
*/
inline
void checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result));
exit(1);
}
#endif
}
__device__ float devData[5];
__global__ void checkGlobalVariable()
{
int tid = threadIdx.x;
// display value before kernel change
printf("The device value: %f and thread id: %d\n", devData[tid], tid);
if (tid < 5)
{
devData[tid] *= tid;
}
}
int main(void)
{
// initialize the global variable
float values[5] = { 3.14f, 3.14f, 3.14f, 3.14f, 3.14f };
checkCuda(hipMemcpyToSymbol(devData, values, 5 * sizeof(float)));
printf("Host: copied %f to the global array\n\n", values[0]);
// invoke the kernel
hipLaunchKernelGGL(( checkGlobalVariable) , dim3(1), dim3(5), 0, 0, );
// copy the global variable back to the host
checkCuda(hipMemcpyFromSymbol(values, devData, 5 * sizeof(float)));
for (int i = 0; i < 5; i++)
{
printf("Host: the value changed by the kernel to %f\n", values[i]);
}
checkCuda(hipDeviceReset());
return EXIT_SUCCESS;
}
|
c287034a500ee0b538cdc634d948b0280a84c485.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
/*
* An example of using a statically declared global variable (devData) to store
* a floating-point value on the device.
*/
inline
void checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
printf_s("Error: %s : %d", __FILE__, __LINE__);
printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result));
exit(1);
}
#endif
}
__device__ float devData[5];
__global__ void checkGlobalVariable()
{
int tid = threadIdx.x;
// display value before kernel change
printf("The device value: %f and thread id: %d\n", devData[tid], tid);
if (tid < 5)
{
devData[tid] *= tid;
}
}
int main(void)
{
// initialize the global variable
float values[5] = { 3.14f, 3.14f, 3.14f, 3.14f, 3.14f };
checkCuda(cudaMemcpyToSymbol(devData, values, 5 * sizeof(float)));
printf("Host: copied %f to the global array\n\n", values[0]);
// invoke the kernel
checkGlobalVariable <<<1, 5>>>();
// copy the global variable back to the host
checkCuda(cudaMemcpyFromSymbol(values, devData, 5 * sizeof(float)));
for (int i = 0; i < 5; i++)
{
printf("Host: the value changed by the kernel to %f\n", values[i]);
}
checkCuda(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
42c26af90d510e46f6344117f822310ad7e97045.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* particles.c Simulation of an exploding bag of bouncy balls / particles
Particles explode outward, fall under the weight of gravity, and bounce off the floor.
Uses simple kinematic definitions from introductory physics:
velocity = dx / dt
acceleration = dv / dt
Uses the known acceleration from gravity:
g = -9.8 m/s^2 (in the y direction)
And takes advantage of the physics of an "inelastic collision" between a bouncy ball and the floor.
x-velocity is unchanged (velx = velx)
y-velocity reverses direction but keeps magnitude (vely = -vely)
Basically just repeats these simple equations many, many times.
Compile with:
gcc -Wall -fopenmp particles.c -o particles.o -lm
References:
* C Arrays tutorial: https://beginnersbook.com/2014/01/c-arrays-example/
* How to do multiple outputs in C: https://www.geeksforgeeks.org/how-to-return-multiple-values-from-a-function-in-c-or-cpp/
* Random numbers in C https://c-for-dummies.com/blog/?p=1458
Created by Scott Feister on April 22, 2020
*/
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <omp.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define NPARTS 1000 // Number of particles
#define NSTEPS 600 // Number of steps to record (60 fps for 10 seconds playback = 600 steps)
#define NSUBSTEPS 5000 // Number of substeps per step (increases physical accuracy; also increases runtime)
__device__
void new_particle(double* x, double* y, double* velx, double* vely, int particleid);
__device__
void new_particle(double* x, double* y, double* velx, double* vely, int particleid) {
hiprandState_t state;
unsigned long seed = 12090923; // Can be anything you want
hiprand_init(seed, particleid*4, 0, &state);
// Note that hiprand_uniform gives a random float number between 0 and 1.
*x = 5.0 * hiprand_uniform(&state); // Initial x value for particles will be between x = 0 meter and x = +5.0 meters
*y = 10.0 + 5.0 * hiprand_uniform(&state); // Initial y value for particles will be between y = +10.0 meters and y = +15.0 meters
*velx = -10.0 + 20.0 * hiprand_uniform(&state); // initial x-velocity will be between -10.0 and +10.0 meters/second
*vely = 20.0 * hiprand_uniform(&state); // initial y-velocity will be between 0 and +20.0 meters/second
}
__global__
void iterate (float* xarr, float* yarr, double dt, double tfinal) {
// Initialize variables relating to each individual particle
double x; // x-value of particle
double y; // y-value of particle
double velx; // x-component particle velocity
double vely; // y-component particle velocity
const double accx = 0.0; // x-component particle acceleration: none, since gravity is in the y direction.
const double accy = -9.8; // y-component particle acceleration: for gravity on Earth, always -9.8 meters/second^2
int i,j,step;
i = blockDim.x * blockIdx.x + threadIdx.x;
// Make particle
new_particle(&x, &y, &velx, &vely,i);
// Iterate over timesteps, for this particle
for (step=0; step<NSTEPS;step++)
{
// Save position values into array
xarr[i*NSTEPS + step] = x;
yarr[i*NSTEPS + step] = y;
for (j=0; j<NSUBSTEPS;j++)
{
// If particle hits the ground (y=0), bounce back up (reverse y-velocity)
if (y < 0 && vely < 0)
{
vely = -vely;
}
// Advance timestep according to simple Newtonian physics equations: v=dx/dt and a=dv/dt.
x += dt * velx; // Compute particle's x-position for next step
y += dt * vely; // Compute particle's y-position for next step
velx += dt * accx; // Compute particle's x-velocity for next step
vely += dt * accy; // Compute particle's y-velocity for next step
}
}
}
int main() {
printf("Assigning / allocating variables and arrays.\n");
// Set final time
const double tfinal = 10.0; // We will simulate 10 seconds of particle time
float* xarr;
float* yarr;
hipMallocManaged(&xarr, NPARTS*NSTEPS*sizeof(float));
hipMallocManaged(&yarr, NPARTS*NSTEPS*sizeof(float));
// Compute timestep
const double dt = tfinal / NSTEPS / NSUBSTEPS; // time step of simulation
// Begin simulation
printf("Running particle simulation...\n");
int i, step;
hipDeviceSynchronize();
// Iterate over particles
hipLaunchKernelGGL(( iterate), dim3(NPARTS),dim3(1), 0, 0, xarr, yarr, dt, tfinal);
hipDeviceSynchronize();
printf("Particle simulation complete!\n");
// Store x and y position arrays to CSV files 'xarr.txt' and 'yarr.txt'
// Each row will be a particle
// Each column will be a step
FILE *fpx, *fpy;
fpx = fopen("xarr.txt","w");
fpy = fopen("yarr.txt","w");
for (i=0; i<NPARTS;i++)
{
for (step=0; step<NSTEPS;step++)
{
fprintf(fpx, "%f,", xarr[i*NSTEPS + step]);
fprintf(fpy, "%f,", yarr[i*NSTEPS + step]);
}
fprintf(fpx,"\n");
fprintf(fpy,"\n");
}
fclose(fpx);
fclose(fpy);
hipFree(xarr);
hipFree(yarr);
printf("Outputs saved to 'xarr.txt' and 'yarr.txt'.\n");
printf("Visualize results by calling 'python particles.py'.\n");
return 0;
}
|
42c26af90d510e46f6344117f822310ad7e97045.cu
|
/* particles.c Simulation of an exploding bag of bouncy balls / particles
Particles explode outward, fall under the weight of gravity, and bounce off the floor.
Uses simple kinematic definitions from introductory physics:
velocity = dx / dt
acceleration = dv / dt
Uses the known acceleration from gravity:
g = -9.8 m/s^2 (in the y direction)
And takes advantage of the physics of an "inelastic collision" between a bouncy ball and the floor.
x-velocity is unchanged (velx = velx)
y-velocity reverses direction but keeps magnitude (vely = -vely)
Basically just repeats these simple equations many, many times.
Compile with:
gcc -Wall -fopenmp particles.c -o particles.o -lm
References:
* C Arrays tutorial: https://beginnersbook.com/2014/01/c-arrays-example/
* How to do multiple outputs in C: https://www.geeksforgeeks.org/how-to-return-multiple-values-from-a-function-in-c-or-cpp/
* Random numbers in C https://c-for-dummies.com/blog/?p=1458
Created by Scott Feister on April 22, 2020
*/
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <omp.h>
#include <curand.h>
#include <curand_kernel.h>
#define NPARTS 1000 // Number of particles
#define NSTEPS 600 // Number of steps to record (60 fps for 10 seconds playback = 600 steps)
#define NSUBSTEPS 5000 // Number of substeps per step (increases physical accuracy; also increases runtime)
__device__
void new_particle(double* x, double* y, double* velx, double* vely, int particleid);
__device__
void new_particle(double* x, double* y, double* velx, double* vely, int particleid) {
curandState state;
unsigned long seed = 12090923; // Can be anything you want
curand_init(seed, particleid*4, 0, &state);
// Note that curand_uniform gives a random float number between 0 and 1.
*x = 5.0 * curand_uniform(&state); // Initial x value for particles will be between x = 0 meter and x = +5.0 meters
*y = 10.0 + 5.0 * curand_uniform(&state); // Initial y value for particles will be between y = +10.0 meters and y = +15.0 meters
*velx = -10.0 + 20.0 * curand_uniform(&state); // initial x-velocity will be between -10.0 and +10.0 meters/second
*vely = 20.0 * curand_uniform(&state); // initial y-velocity will be between 0 and +20.0 meters/second
}
__global__
void iterate (float* xarr, float* yarr, double dt, double tfinal) {
// Initialize variables relating to each individual particle
double x; // x-value of particle
double y; // y-value of particle
double velx; // x-component particle velocity
double vely; // y-component particle velocity
const double accx = 0.0; // x-component particle acceleration: none, since gravity is in the y direction.
const double accy = -9.8; // y-component particle acceleration: for gravity on Earth, always -9.8 meters/second^2
int i,j,step;
i = blockDim.x * blockIdx.x + threadIdx.x;
// Make particle
new_particle(&x, &y, &velx, &vely,i);
// Iterate over timesteps, for this particle
for (step=0; step<NSTEPS;step++)
{
// Save position values into array
xarr[i*NSTEPS + step] = x;
yarr[i*NSTEPS + step] = y;
for (j=0; j<NSUBSTEPS;j++)
{
// If particle hits the ground (y=0), bounce back up (reverse y-velocity)
if (y < 0 && vely < 0)
{
vely = -vely;
}
// Advance timestep according to simple Newtonian physics equations: v=dx/dt and a=dv/dt.
x += dt * velx; // Compute particle's x-position for next step
y += dt * vely; // Compute particle's y-position for next step
velx += dt * accx; // Compute particle's x-velocity for next step
vely += dt * accy; // Compute particle's y-velocity for next step
}
}
}
int main() {
printf("Assigning / allocating variables and arrays.\n");
// Set final time
const double tfinal = 10.0; // We will simulate 10 seconds of particle time
float* xarr;
float* yarr;
cudaMallocManaged(&xarr, NPARTS*NSTEPS*sizeof(float));
cudaMallocManaged(&yarr, NPARTS*NSTEPS*sizeof(float));
// Compute timestep
const double dt = tfinal / NSTEPS / NSUBSTEPS; // time step of simulation
// Begin simulation
printf("Running particle simulation...\n");
int i, step;
cudaDeviceSynchronize();
// Iterate over particles
iterate<<<NPARTS,1>>>(xarr, yarr, dt, tfinal);
cudaDeviceSynchronize();
printf("Particle simulation complete!\n");
// Store x and y position arrays to CSV files 'xarr.txt' and 'yarr.txt'
// Each row will be a particle
// Each column will be a step
FILE *fpx, *fpy;
fpx = fopen("xarr.txt","w");
fpy = fopen("yarr.txt","w");
for (i=0; i<NPARTS;i++)
{
for (step=0; step<NSTEPS;step++)
{
fprintf(fpx, "%f,", xarr[i*NSTEPS + step]);
fprintf(fpy, "%f,", yarr[i*NSTEPS + step]);
}
fprintf(fpx,"\n");
fprintf(fpy,"\n");
}
fclose(fpx);
fclose(fpy);
cudaFree(xarr);
cudaFree(yarr);
printf("Outputs saved to 'xarr.txt' and 'yarr.txt'.\n");
printf("Visualize results by calling 'python particles.py'.\n");
return 0;
}
|
625784f50a7ddf53d96fd989a5733d370f466cbd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include<stdint.h>
#include<stdlib.h>
#include<cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
double kankaku,hatyou,goukei;
hatyou=0.633;
kankaku=10.5;
goukei=2.0*M_PI*kankaku/hatyou;
for(j=0;j<WID;j++){
for(k=0;k<*tensuu_d;k++){
img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k]));
}
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
printf(" : ");
scanf("%s",filename);
fp=fopen(filename,"rb");
if(fp==NULL){
printf("\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("%d\n",tensuu);
int x[tensuu];
int y[tensuu];
double z[tensuu];
int *tensuu_d;
hipMalloc((void**)&tensuu_d,sizeof(int));
hipMemcpy(tensuu_d,&tensuu,sizeof(int),hipMemcpyHostToDevice);
int *x_d,*y_d;
double *z_d;
double *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((double)z_buf)*40+100000.0;
}
fclose(fp);
hipMalloc((void**)&x_d,tensuu*sizeof(int));
hipMalloc((void**)&y_d,tensuu*sizeof(int));
hipMalloc((void**)&z_d,tensuu*sizeof(double));
hipMalloc((void**)&img_buf_d,WID*HEI*sizeof(double));
double *img_buf;
img_buf=(double *)malloc(sizeof(double)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0;
}
hipMemcpy(x_d,x,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(y_d,y,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(z_d,z,tensuu*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distance_gpu), dim3(blocks),dim3(threads), 0, 0, x_d,y_d,z_d,img_buf_d,tensuu_d);
hipMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),hipMemcpyDeviceToHost);
double min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
hipFree(tensuu_d);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(img_buf_d);
return 0;
}
|
625784f50a7ddf53d96fd989a5733d370f466cbd.cu
|
#include <stdio.h>
#include <math.h>
#include<stdint.h>
#include<stdlib.h>
#include<cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
double kankaku,hatyou,goukei;
hatyou=0.633;
kankaku=10.5;
goukei=2.0*M_PI*kankaku/hatyou;
for(j=0;j<WID;j++){
for(k=0;k<*tensuu_d;k++){
img_buf_d[i*WID+j]=img_buf_d[i*WID+j]+cos(goukei*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k]));
}
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256階調
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
printf("ファイル名を入力してください : ");
scanf("%s",filename);
fp=fopen(filename,"rb");
if(fp==NULL){
printf("ファイルオープンエラー\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("物体点数は%dです\n",tensuu);
int x[tensuu];
int y[tensuu];
double z[tensuu];
int *tensuu_d;
cudaMalloc((void**)&tensuu_d,sizeof(int));
cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice);
int *x_d,*y_d;
double *z_d;
double *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((double)z_buf)*40+100000.0;
}
fclose(fp);
cudaMalloc((void**)&x_d,tensuu*sizeof(int));
cudaMalloc((void**)&y_d,tensuu*sizeof(int));
cudaMalloc((void**)&z_d,tensuu*sizeof(double));
cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(double));
double *img_buf;
img_buf=(double *)malloc(sizeof(double)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0;
}
cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(z_d,z,tensuu*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),cudaMemcpyHostToDevice);
distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d);
cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),cudaMemcpyDeviceToHost);
double min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("ファイルオープンエラー\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
cudaFree(tensuu_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(img_buf_d);
return 0;
}
|
43940f893d75f8d5d027d8514ca63e24800b5da6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeInternalEnergy_kernel(float *Rho, float *Vx, float *Vy, float *Vz, float *Etot, float *Eneint, float *Bx, float *By, float *Bz, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid >= size)
return;
// compute internal energy
Eneint[igrid] = Etot[igrid] - 0.5*(Vx[igrid]*Vx[igrid] + Vy[igrid]*Vy[igrid] + Vz[igrid]*Vz[igrid]) -
0.5*(Bx[igrid]*Bx[igrid] + By[igrid]*By[igrid] + Bz[igrid]*Bz[igrid])/Rho[igrid];
}
|
43940f893d75f8d5d027d8514ca63e24800b5da6.cu
|
#include "includes.h"
__global__ void ComputeInternalEnergy_kernel(float *Rho, float *Vx, float *Vy, float *Vz, float *Etot, float *Eneint, float *Bx, float *By, float *Bz, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid >= size)
return;
// compute internal energy
Eneint[igrid] = Etot[igrid] - 0.5*(Vx[igrid]*Vx[igrid] + Vy[igrid]*Vy[igrid] + Vz[igrid]*Vz[igrid]) -
0.5*(Bx[igrid]*Bx[igrid] + By[igrid]*By[igrid] + Bz[igrid]*Bz[igrid])/Rho[igrid];
}
|
88e76793cb13395216cb5f140c42eba69e88f890.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//scan.cu
//#include "kernel.hip"
#include "comm.h"
#include "wtime.h"
#include "iostream"
#define max_thd 256
#define max_block 256
#define thread_limit 256
#define block_limit 1024
#define GPU_COWORKER 1
graph * mygraph;
__global__ void block_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]+=val;
// count[blockIdx.x]=val;
}
}
__global__ void warp_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_COWORKER* blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]=val;
}
__syncthreads();
}
//----------------------------------------------------------------------------------------
__global__ void reduce_kernel2(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void* part_scan(void * data){
index_t thd_count=0;
int GPU_id = *(int*)data;
int i = GPU_id;
// cout<<"GPU id = "<<GPU_id<<"\n";
// hipSetDevice(GPU_id);
hipSetDevice(2);
H_ERR(hipDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
index_t partEdgeCount = mygraph->partEdgeCount[i];
vertex_t vert_count = mygraph->vert_count;
vertex_t* partAdj = mygraph->partAdj[i];
index_t* partBegin = mygraph->partBegin[i];
index_t* count = mygraph->count;
H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) );
index_t* block_offset;
H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) );
H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) );
double time2=wtime();
for(int j=0; j<PART_NUM; j++){
index_t totalEdgeCount = mygraph->partEdgeCount[j];
vertex_t* head = mygraph->partHead[j];
vertex_t* adj = mygraph->partAdj[j];
vertex_t* src_head;
vertex_t* src_adj;
H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
//
double time1=wtime();
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
src_head,
src_adj,
dev_adj,
dev_begin,
0,
totalEdgeCount,
dev_count
);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count);
H_ERR(hipDeviceSynchronize() );
H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost));
thd_count += count[i];
H_ERR(hipFree(src_head) );
H_ERR(hipFree(src_adj) );
}
double time4 = wtime();
count[i] = thd_count;
H_ERR(hipFree(dev_adj) );
H_ERR(hipFree(dev_begin) );
H_ERR(hipFree(block_offset) );
H_ERR(hipFree(dev_count) );
return NULL;
}
|
88e76793cb13395216cb5f140c42eba69e88f890.cu
|
//scan.cu
//#include "kernel.cu"
#include "comm.h"
#include "wtime.h"
#include "iostream"
#define max_thd 256
#define max_block 256
#define thread_limit 256
#define block_limit 1024
#define GPU_COWORKER 1
graph * mygraph;
__global__ void block_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]+=val;
// count[blockIdx.x]=val;
}
}
__global__ void warp_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_COWORKER* blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]=val;
}
__syncthreads();
}
//----------------------------------------------------------------------------------------
__global__ void reduce_kernel2(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void* part_scan(void * data){
index_t thd_count=0;
int GPU_id = *(int*)data;
int i = GPU_id;
// cout<<"GPU id = "<<GPU_id<<"\n";
// cudaSetDevice(GPU_id);
cudaSetDevice(2);
H_ERR(cudaDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
index_t partEdgeCount = mygraph->partEdgeCount[i];
vertex_t vert_count = mygraph->vert_count;
vertex_t* partAdj = mygraph->partAdj[i];
index_t* partBegin = mygraph->partBegin[i];
index_t* count = mygraph->count;
H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) );
index_t* block_offset;
H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) );
H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) );
double time2=wtime();
for(int j=0; j<PART_NUM; j++){
index_t totalEdgeCount = mygraph->partEdgeCount[j];
vertex_t* head = mygraph->partHead[j];
vertex_t* adj = mygraph->partAdj[j];
vertex_t* src_head;
vertex_t* src_adj;
H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
//
double time1=wtime();
H_ERR(cudaDeviceSynchronize() );
warp_binary_kernel<<<max_block,max_thd>>>
( src_head,
src_adj,
dev_adj,
dev_begin,
0,
totalEdgeCount,
dev_count
);
H_ERR(cudaDeviceSynchronize() );
reduce_kernel2 <<<1,1>>>(dev_count);
H_ERR(cudaDeviceSynchronize() );
H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost));
thd_count += count[i];
H_ERR(cudaFree(src_head) );
H_ERR(cudaFree(src_adj) );
}
double time4 = wtime();
count[i] = thd_count;
H_ERR(cudaFree(dev_adj) );
H_ERR(cudaFree(dev_begin) );
H_ERR(cudaFree(block_offset) );
H_ERR(cudaFree(dev_count) );
return NULL;
}
|
f6de372fa4c8e58393ea742ffdb69ca6de347a0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MaxpoolingLayerKernel.cu
*
* Created on: Jun 6, 2017
* Author: carol
*/
#include "cudaUtil.h"
#include "MaxpoolingLayerKernel.h"
#define MAXPOOL_SIZE 2
__device__ inline size_t get_out_index(size_t out_width, size_t out_height,
size_t out, size_t h_, size_t w_) {
return out * out_width * out_height + h_ / 2 * out_width + (w_ / 2);
}
__device__ inline Pair get_max_loc_pair(size_t first, size_t second) {
Pair ret;
ret.first = first;
ret.second = second;
return ret;
}
__device__ inline float max_in_(float_t *input_, Pair *max_loc,
size_t in_width_, size_t in_height_, size_t in_index, size_t h_,
size_t w_, size_t out_index) {
float_t max_pixel = 0;
size_t tmp;
#pragma unroll
for (size_t x = 0; x < MAXPOOL_SIZE; x++) {
#pragma unroll
for (size_t y = 0; y < MAXPOOL_SIZE; y++) {
tmp = (in_index * in_width_ * in_height_) + ((h_ + y) * in_width_)
+ (w_ + x);
if (max_pixel < input_[tmp]) {
max_pixel = input_[tmp];
max_loc[out_index] = get_max_loc_pair(out_index, tmp);
}
}
}
return max_pixel;
}
/**
* void MaxpoolingLayer::forward_cpu() {
for (size_t out = 0; out < out_depth_; out++) {
for (size_t h_ = 0; h_ < in_height_; h_ += 2) {
for (size_t w_ = 0; w_ < in_width_; w_ += 2) {
output_[getOutIndex(out, h_, w_)] = max_In_(out, h_, w_,
getOutIndex(out, h_, w_));
}
}
}
}
*/
__global__ void forward_maxpool_layer_kernel(float_t *input_, Pair *max_loc,
float_t *output_, size_t out_width, size_t out_height,
size_t out_depth_, size_t in_height, size_t in_width) {
int h_ = blockIdx.y * blockDim.y + threadIdx.y;
int w_ = (blockIdx.x * blockDim.x + threadIdx.x) / out_depth_;
int out = (blockIdx.x * blockDim.x + threadIdx.x) % out_depth_;
// for (size_t out = 0; out < out_depth_; out++) {
// for (size_t h_ = 0; h_ < in_height_; h_ += 2) {
// for (size_t w_ = 0; w_ < in_width_; w_ += 2) {
if ((out < out_depth_) && (h_ < in_height) && (w_ < in_width) && !(h_ % 2)
&& !(w_ % 2)) {
size_t index = get_out_index(out_width, out_height, out, h_, w_);
output_[index] = max_in_(input_, max_loc, in_width, in_height, out, h_,
w_, index);
}
}
void call_forward_maxpool_layer_gpu(float_t *input, float_t *output,
Pair *max_loc, size_t out_width, size_t out_height, size_t out_depth,
size_t in_height, size_t in_width) {
dim3 blocks, threads;
cuda_gridsize(&threads, &blocks, in_width * out_depth, in_height);
//printf("in_height %d in_width * out_depth %d threads x %d threads y %d\n", in_height, in_width * out_depth, threads.x, threads.y);
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(blocks), dim3(threads), 0, 0, input, max_loc, output,
out_width, out_height, out_depth, in_height, in_width);
CudaCheckError();
}
__global__ void backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next,
size_t max_size, size_t g_max_size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > max_size)
return;
Pair p = max_loc[x];
if (p.first != MAX && p.second != MAX && p.second < g_max_size && p.first < g_max_size) {
g_[p.second] = g_next[p.first];
}
}
void call_backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next, size_t max_size, size_t g_max_size) {
dim3 blocks, threads;
cuda_gridsize(&threads, &blocks, max_size);
// for(int i = 0; i < max_size; i++){
// auto p = max_loc[i];
// if(p.first != MAX && p.second == MAX){
// std::cout << p.first << " " << p.second << " " << i << "\n";
// }
// }
assert(g_max_size != 0);
hipLaunchKernelGGL(( backpropagation_maxpool), dim3(blocks), dim3(threads), 0, 0, max_loc, g_, g_next, max_size, g_max_size);
CudaCheckError();
}
|
f6de372fa4c8e58393ea742ffdb69ca6de347a0f.cu
|
/*
* MaxpoolingLayerKernel.cu
*
* Created on: Jun 6, 2017
* Author: carol
*/
#include "cudaUtil.h"
#include "MaxpoolingLayerKernel.h"
#define MAXPOOL_SIZE 2
__device__ inline size_t get_out_index(size_t out_width, size_t out_height,
size_t out, size_t h_, size_t w_) {
return out * out_width * out_height + h_ / 2 * out_width + (w_ / 2);
}
__device__ inline Pair get_max_loc_pair(size_t first, size_t second) {
Pair ret;
ret.first = first;
ret.second = second;
return ret;
}
__device__ inline float max_in_(float_t *input_, Pair *max_loc,
size_t in_width_, size_t in_height_, size_t in_index, size_t h_,
size_t w_, size_t out_index) {
float_t max_pixel = 0;
size_t tmp;
#pragma unroll
for (size_t x = 0; x < MAXPOOL_SIZE; x++) {
#pragma unroll
for (size_t y = 0; y < MAXPOOL_SIZE; y++) {
tmp = (in_index * in_width_ * in_height_) + ((h_ + y) * in_width_)
+ (w_ + x);
if (max_pixel < input_[tmp]) {
max_pixel = input_[tmp];
max_loc[out_index] = get_max_loc_pair(out_index, tmp);
}
}
}
return max_pixel;
}
/**
* void MaxpoolingLayer::forward_cpu() {
for (size_t out = 0; out < out_depth_; out++) {
for (size_t h_ = 0; h_ < in_height_; h_ += 2) {
for (size_t w_ = 0; w_ < in_width_; w_ += 2) {
output_[getOutIndex(out, h_, w_)] = max_In_(out, h_, w_,
getOutIndex(out, h_, w_));
}
}
}
}
*/
__global__ void forward_maxpool_layer_kernel(float_t *input_, Pair *max_loc,
float_t *output_, size_t out_width, size_t out_height,
size_t out_depth_, size_t in_height, size_t in_width) {
int h_ = blockIdx.y * blockDim.y + threadIdx.y;
int w_ = (blockIdx.x * blockDim.x + threadIdx.x) / out_depth_;
int out = (blockIdx.x * blockDim.x + threadIdx.x) % out_depth_;
// for (size_t out = 0; out < out_depth_; out++) {
// for (size_t h_ = 0; h_ < in_height_; h_ += 2) {
// for (size_t w_ = 0; w_ < in_width_; w_ += 2) {
if ((out < out_depth_) && (h_ < in_height) && (w_ < in_width) && !(h_ % 2)
&& !(w_ % 2)) {
size_t index = get_out_index(out_width, out_height, out, h_, w_);
output_[index] = max_in_(input_, max_loc, in_width, in_height, out, h_,
w_, index);
}
}
void call_forward_maxpool_layer_gpu(float_t *input, float_t *output,
Pair *max_loc, size_t out_width, size_t out_height, size_t out_depth,
size_t in_height, size_t in_width) {
dim3 blocks, threads;
cuda_gridsize(&threads, &blocks, in_width * out_depth, in_height);
//printf("in_height %d in_width * out_depth %d threads x %d threads y %d\n", in_height, in_width * out_depth, threads.x, threads.y);
forward_maxpool_layer_kernel<<<blocks, threads>>>(input, max_loc, output,
out_width, out_height, out_depth, in_height, in_width);
CudaCheckError();
}
__global__ void backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next,
size_t max_size, size_t g_max_size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > max_size)
return;
Pair p = max_loc[x];
if (p.first != MAX && p.second != MAX && p.second < g_max_size && p.first < g_max_size) {
g_[p.second] = g_next[p.first];
}
}
void call_backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next, size_t max_size, size_t g_max_size) {
dim3 blocks, threads;
cuda_gridsize(&threads, &blocks, max_size);
// for(int i = 0; i < max_size; i++){
// auto p = max_loc[i];
// if(p.first != MAX && p.second == MAX){
// std::cout << p.first << " " << p.second << " " << i << "\n";
// }
// }
assert(g_max_size != 0);
backpropagation_maxpool<<<blocks, threads>>>(max_loc, g_, g_next, max_size, g_max_size);
CudaCheckError();
}
|
aa788623fd14e2306f7f4fcf27d5828cb5bd1d06.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
texture <int2, hipTextureType1D, hipReadModeElementType> lt_tex;
texture <int2, hipTextureType1D, hipReadModeElementType> rt_tex;
extern "C" {
__global__
void count_partitioning_lt(
//TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void count_partitioning_rt(
//TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning_lt(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
//int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
//
//fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
//hash = fetched_val.y % p_n;
hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
/*
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
*/
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
__global__
void partitioning_rt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
|
aa788623fd14e2306f7f4fcf27d5828cb5bd1d06.cu
|
/*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
texture <int2, cudaTextureType1D, cudaReadModeElementType> lt_tex;
texture <int2, cudaTextureType1D, cudaReadModeElementType> rt_tex;
extern "C" {
__global__
void count_partitioning_lt(
//TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void count_partitioning_rt(
//TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning_lt(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
//int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
//ここまで実行
//fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
//hash = fetched_val.y % p_n;
hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
/*
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
*/
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
__global__
void partitioning_rt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
|
31f74c0765d1528aa5f51a6be7560c993e7e663e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <assert.h>
#include <nvmatrix_kernels.cuh>
#include <nvmatrix.cuh>
#include <conv_util.cuh>
using namespace std;
__device__ inline float square(const float a) {
return a * a;
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) {
int numFilters = weights.getNumCols();
int weightsPerFilter = weights.getNumRows() / numModules;
assert(numModules * weightsPerFilter == weights.getNumRows());
assert(!weights.isTrans());
assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
if (filtersPerThread == 4) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, 0, weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target.getNumCols() : images.getNumCols();
int imgPixels = imgSize * imgSize;
assert(!images.isTrans());
assert(!target.isTrans());
assert(images.isContiguous());
assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target.getNumRows() == numChannels * outputs);
} else {
assert(images.getNumRows() == numChannels * imgPixels);
}
if (scaleTargets == 0) {
if (reverse) {
images.resize(numChannels * imgPixels, numImages);
images.apply(NVMatrixOps::Zero());
} else {
target.resize(numChannels*outputs, numImages);
}
} else {
if (reverse) {
assert(images.getNumRows() == numChannels * outputs);
assert(images.getNumCols() == numImages);
} else {
assert(target.getNumRows() == numChannels * outputs);
assert(target.getNumCols() == numImages);
}
}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images.getNumCols();
int radius = filter.getNumCols() / 2;
int imgPixels = images.getNumRows() / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter.getNumRows() == 1);
assert(images.getNumRows() == numChannels * imgPixels);
assert(!images.isTrans());
assert(!filter.isTrans());
assert(!target.isTrans());
assert(target.isContiguous());
if (scaleTargets == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
if (radius == 1) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
//prod[f][i] = 1 + addScale * prod[f][i];
prod[f][i] = 2 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
//prod[f][i] = 1 + addScale * prod[f][i];
prod[f][i] = 2 + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += pxIdx * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
// prod[i] = 1 + addScale * prod[i];
prod[i] = 2 + addScale * prod[i];
denoms[i * B_X] = prod[i];
target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += pxIdx * numImages + imgIdx;
inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += acts[f * imgPixels * numImages + i * B_X];
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
imgs += pxIdx * numImages;
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// adding 1/S values
prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X];
}
}
}
}
}
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX) {
convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1);
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images.getNumCols();
int numFilters = maxGrads.getNumRows() / outputs;
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads.getNumRows() == numFilters * outputs);
assert(maxGrads.getNumCols() == numImages);
assert(!images.isTrans());
assert(!target.isTrans());
assert(!maxGrads.isTrans());
assert(!maxActs.isTrans());
assert(images.isContiguous());
assert(maxGrads.isContiguous());
assert(maxActs.isContiguous());
assert(maxGrads.isSameDims(maxActs));
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(images);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads.getNumCols();
int outputs = outputsX * outputsX;
int imgPixels = imgSize * imgSize;
int numFilters = avgGrads.getNumRows() / outputs;
assert(avgGrads.getNumRows() == numFilters * outputs);
assert(!target.isTrans());
assert(!avgGrads.isTrans());
assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(numFilters * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale);
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput);
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = images.getNumRows() / imgPixels;
int numImages = images.getNumCols();
assert(images.getNumRows() == numChannels * imgPixels);
target.resize(numChannels * tgtPixels, numImages);
assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(NVMatrix& images, NVMatrix& target) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs.getNumCols();
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs.getNumRows() / imgPixels;
assert(imgs.getNumRows() == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, 0, imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
assert(ticas.isSameDims(images));
assert(ticas.isContiguous());
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
hipFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICAGrad_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICAGrad: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
hipFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kTICA_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICA: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target,
int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
if (blocked) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked);
}
|
31f74c0765d1528aa5f51a6be7560c993e7e663e.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <assert.h>
#include <nvmatrix_kernels.cuh>
#include <nvmatrix.cuh>
#include <conv_util.cuh>
using namespace std;
__device__ inline float square(const float a) {
return a * a;
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(NVMatrix& weights, int numModules, float norm) {
int numFilters = weights.getNumCols();
int weightsPerFilter = weights.getNumRows() / numModules;
assert(numModules * weightsPerFilter == weights.getNumRows());
assert(!weights.isTrans());
assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
if (filtersPerThread == 4) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 4><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 2><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
} else {
cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<8, 16, 1><<<blocks, threads>>>(weights.getDevData(), numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target.getNumCols() : images.getNumCols();
int imgPixels = imgSize * imgSize;
assert(!images.isTrans());
assert(!target.isTrans());
assert(images.isContiguous());
assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target.getNumRows() == numChannels * outputs);
} else {
assert(images.getNumRows() == numChannels * imgPixels);
}
if (scaleTargets == 0) {
if (reverse) {
images.resize(numChannels * imgPixels, numImages);
images.apply(NVMatrixOps::Zero());
} else {
target.resize(numChannels*outputs, numImages);
}
} else {
if (reverse) {
assert(images.getNumRows() == numChannels * outputs);
assert(images.getNumCols() == numImages);
} else {
assert(target.getNumRows() == numChannels * outputs);
assert(target.getNumCols() == numImages);
}
}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images.getNumCols();
int radius = filter.getNumCols() / 2;
int imgPixels = images.getNumRows() / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter.getNumRows() == 1);
assert(images.getNumRows() == numChannels * imgPixels);
assert(!images.isTrans());
assert(!filter.isTrans());
assert(!target.isTrans());
assert(target.isContiguous());
if (scaleTargets == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
if (radius == 1) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 1><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 2><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 3><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 4><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(),
imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
//prod[f][i] = 1 + addScale * prod[f][i];
prod[f][i] = 2 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
//prod[f][i] = 1 + addScale * prod[f][i];
prod[f][i] = 2 + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 1 + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += pxIdx * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(meanDiffs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
// prod[i] = 1 + addScale * prod[i];
prod[i] = 2 + addScale * prod[i];
denoms[i * B_X] = prod[i];
target[i * B_X] = imgs[i * B_X] * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += pxIdx * numImages + imgIdx;
inputs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
denoms += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
outGrads += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += acts[f * imgPixels * numImages + i * B_X];
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = inputs[i * B_X];
const float out = outGrads[i * B_X];
const float den = denoms[i * B_X];
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICA_manyfilter(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(imgs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
imgs += pxIdx * numImages;
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * __fdividef(1.0f, 0.001 + sqrtf(prod[f][i]));
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*
* sizeX should be something like 3 or 5 for this function. Not much more.
* TODO: write variant where each block does 4x4 region or so (this'll be based on kCNorm2).
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kTICAGrad_manyfilter(float* imgs, float* ticas, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float scaleTarget, const float scaleOutput) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
ticas += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// adding 1/S values
prod[f][i] += ticas[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X];
}
}
}
}
}
if (scaleTarget == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTarget * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutput * -imgs[f * B_Y * imgPixels * numImages + i * B_X] * sqrtf(prod[f][i]);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX) {
convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1);
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images.getNumCols();
int numFilters = maxGrads.getNumRows() / outputs;
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads.getNumRows() == numFilters * outputs);
assert(maxGrads.getNumCols() == numImages);
assert(!images.isTrans());
assert(!target.isTrans());
assert(!maxGrads.isTrans());
assert(!maxActs.isTrans());
assert(images.isContiguous());
assert(maxGrads.isContiguous());
assert(maxActs.isContiguous());
assert(maxGrads.isSameDims(maxActs));
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(images);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads.getNumCols();
int outputs = outputsX * outputsX;
int imgPixels = imgSize * imgSize;
int numFilters = avgGrads.getNumRows() / outputs;
assert(avgGrads.getNumRows() == numFilters * outputs);
assert(!target.isTrans());
assert(!avgGrads.isTrans());
assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
target.resize(numFilters * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale);
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numImages, sizeX, addScale, powScale);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, addScale, powScale);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput);
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(NVMatrix& images, NVMatrix& target, int imgSize, int tgtSize, float scale) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = images.getNumRows() / imgPixels;
int numImages = images.getNumCols();
assert(images.getNumRows() == numChannels * imgPixels);
target.resize(numChannels * tgtPixels, numImages);
assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1);
kResizeBilinear<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1);
kResizeBilinear<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1);
kResizeBilinear<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1);
kResizeBilinear<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1);
kResizeBilinear<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1);
kResizeBilinear<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, tgtSize, numImages, images.getStride(), scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(NVMatrix& images, NVMatrix& target) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1);
kRGBToYUV<4, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1);
kRGBToYUV<4, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1);
kRGBToYUV<2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1);
kRGBToYUV<2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1);
kRGBToYUV<1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1);
kRGBToYUV<1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(NVMatrix& images, NVMatrix& target, bool center) {
assert(!images.isTrans());
assert(!target.isTrans());
int imgPixels = images.getNumRows() / 3;
int numImages = images.getNumCols();
assert(images.getNumRows() == 3 * imgPixels);
target.resize(3 * imgPixels, numImages);
assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
} else {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgPixels, numImages, images.getStride());
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(NVMatrix& imgs, NVMatrix& target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs.getNumCols();
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs.getNumRows() / imgPixels;
assert(imgs.getNumRows() == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
kCrop<4, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<4, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
kCrop<2, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<2, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
kCrop<1, true><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
} else {
kCrop<1, false><<<blocks, threads>>>(imgs.getDevData(), target.getDevData(), numImages, imgs.getStride(), imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* ticas: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICAGrad(NVMatrix& images, NVMatrix& ticas, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
assert(ticas.isSameDims(images));
assert(ticas.isContiguous());
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kTICAGrad_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
cudaFuncSetCacheConfig(kTICAGrad_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kTICAGrad_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), ticas.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICAGrad: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages) (out)
*
* Computes TICA-style gradient for given feature maps
* f(x) = exp(-(sum_i{x_i^2}^(1/2)))
* dlogf(x)/df(x) = -x_i / (sum_i{x_i^2}^(1/2) + eps)
*
* eps added for numerical stability
*/
void convTICA(NVMatrix& images, NVMatrix& target, int numFilters, int sizeX, float scaleTarget, float scaleOutput) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images.isTrans());
assert(images.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
if (scaleTarget == 0) {
target.resize(images);
} else {
assert(target.isSameDims(images));
}
assert(target.isContiguous());
// TEMPORARY
assert(numFilters > 8);
assert(sizeX < 6);
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, 32*4) * imgSize, (numFilters / (4 * 2)) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kTICA_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
} else {
cudaFuncSetCacheConfig(kTICA_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kTICA_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeX, scaleTarget, scaleOutput);
}
getLastCudaError("convTICA: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target,
int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numFilters;
assert(images.getNumRows() == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs.isTrans());
assert(!images.isTrans());
assert(images.isContiguous());
assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
target.resize(images);
denoms.resize(images);
assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
if (blocked) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(),
imgSize, numFilters, numImages, sizeF, addScale, powScale);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters,
int sizeF, float addScale, float powScale, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads.getNumCols();
int imgPixels = outGrads.getNumRows() / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads.getNumRows() == numFilters * imgPixels);
assert(denoms.isSameDims(outGrads));
assert(acts.isSameDims(denoms));
assert(!denoms.isTrans());
assert(!outGrads.isTrans());
assert(!acts.isTrans());
assert(!target.isTrans());
assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
target.resize(outGrads);
assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 4;
dim3 threads(128);
dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread))));
kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale);
// Now the main routine
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, true, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, true>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, false, true><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, true, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, false, false, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, false, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, true, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, true, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo<4, 32, 4, true, false, false>, cudaFuncCachePreferL1);
kFRNormUndo<4, 32, 4, true, false, false><<<blocks2, threads2>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(),
target.getDevData(), imgSize, numFilters, numImages, sizeF, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
void convResponseNormCrossMap(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeF, float addScale, float powScale, bool blocked) {
convContrastNormCrossMap(images, images, denoms, target, numFilters, sizeF, addScale, powScale, blocked);
}
|
82e88f8daf78aee9f420c8cae859a7dcf944c86e.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include "paddle/phi/kernels/matrix_rank_tol_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/abs_kernel.h"
#include "paddle/phi/kernels/elementwise_multiply_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/compare_functors.h"
#include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h"
#include "paddle/phi/kernels/reduce_max_kernel.h"
#include "paddle/phi/kernels/reduce_sum_kernel.h"
namespace phi {
template <typename T>
static void GesvdjBatched(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
T* A,
T* U,
T* V,
T* S,
int* info,
int thin_UV = 1);
template <typename T>
void SyevjBatched(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
T* A,
T* W,
int* info);
template <>
void GesvdjBatched<float>(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
float* A,
float* U,
float* V,
float* S,
int* info,
int thin_UV) {
// do not compute singular vectors
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR;
hipsolverGesvdjInfo_t gesvdj_params = NULL;
int lda = m;
int ldu = m;
int ldt = n;
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params));
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnSgesvdj_bufferSize(handle,
jobz,
thin_UV,
m,
n,
A,
lda,
S,
U,
ldu,
V,
ldt,
&lwork,
gesvdj_params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
int stride_A = lda * n;
int stride_U = ldu * (thin_UV ? k : m);
int stride_V = ldt * (thin_UV ? k : n);
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgesvdj(handle,
jobz,
thin_UV,
m,
n,
A + stride_A * i,
lda,
S + k * i,
U + stride_U * i,
ldu,
V + stride_V * i,
ldt,
workspace_ptr,
lwork,
info,
gesvdj_params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
template <>
void GesvdjBatched<double>(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
double* A,
double* U,
double* V,
double* S,
int* info,
int thin_UV) {
// do not compute singular vectors
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR;
hipsolverGesvdjInfo_t gesvdj_params = NULL;
int lda = m;
int ldu = m;
int ldt = n;
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params));
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnDgesvdj_bufferSize(handle,
jobz,
thin_UV,
m,
n,
A,
lda,
S,
U,
ldu,
V,
ldt,
&lwork,
gesvdj_params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
int stride_A = lda * n;
int stride_U = ldu * (thin_UV ? k : m);
int stride_V = ldt * (thin_UV ? k : n);
for (int i = 0; i < batchSize; ++i) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgesvdj(handle,
jobz,
thin_UV,
m,
n,
A + stride_A * i,
lda,
S + k * i,
U + stride_U * i,
ldu,
V + stride_V * i,
ldt,
workspace_ptr,
lwork,
info,
gesvdj_params));
// check the error info
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
template <>
void SyevjBatched<float>(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
float* A,
float* W,
int* info) {
auto handle = dev_ctx.cusolver_dn_handle();
// Compute eigenvalues only
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR;
// matrix is saved as column-major in cusolver.
// numpy and torch use lower triangle to compute eigenvalues, so here use
// upper triangle
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
int lda = n;
int stride_A = lda * n;
int lwork = 0;
hipsolverSyevjInfo_t params = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(¶ms));
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj_bufferSize(
handle, jobz, uplo, n, A, lda, W, &lwork, params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj(handle,
jobz,
uplo,
n,
A + stride_A * i,
lda,
W + n * i,
workspace_ptr,
lwork,
info,
params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver eigenvalues is not zero. [%d]",
i,
error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params));
}
template <>
void SyevjBatched<double>(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
double* A,
double* W,
int* info) {
auto handle = dev_ctx.cusolver_dn_handle();
// Compute eigenvalues only
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR;
// upper triangle of A is stored
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
int lda = n;
int stride_A = lda * n;
int lwork = 0;
hipsolverSyevjInfo_t params = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(¶ms));
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj_bufferSize(
handle, jobz, uplo, n, A, lda, W, &lwork, params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj(handle,
jobz,
uplo,
n,
A + stride_A * i,
lda,
W + n * i,
workspace_ptr,
lwork,
info,
params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver eigenvalues is not zero. [%d]",
i,
error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params));
}
template <typename T, typename Context>
void MatrixRankTolKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& atol_tensor,
bool use_default_tol,
bool hermitian,
DenseTensor* out) {
auto* x_data = x.data<T>();
dev_ctx.template Alloc<int64_t>(out);
auto dim_x = x.dims();
auto dim_out = out->dims();
int rows = dim_x[dim_x.size() - 2];
int cols = dim_x[dim_x.size() - 1];
int k = ::min(rows, cols);
auto numel = x.numel();
int batches = numel / (rows * cols);
T rtol_T = 0;
if (use_default_tol) {
rtol_T = std::numeric_limits<T>::epsilon() * ::max(rows, cols);
}
// Must Copy X once, because the gesvdj will destory the content when exit.
DenseTensor x_tmp;
paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp);
auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches);
int* info_ptr = reinterpret_cast<int*>(info->ptr());
DenseTensor eigenvalue_tensor;
eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k));
auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor);
if (hermitian) {
SyevjBatched<T>(
dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr);
phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor);
} else {
DenseTensor U, VH;
U.Resize(detail::GetUDDim(dim_x, k));
VH.Resize(detail::GetVHDDim(dim_x, k));
auto* u_data = dev_ctx.template Alloc<T>(&U);
auto* vh_data = dev_ctx.template Alloc<T>(&VH);
GesvdjBatched<T>(dev_ctx,
batches,
cols,
rows,
k,
x_tmp.data<T>(),
vh_data,
u_data,
eigenvalue_data,
info_ptr,
1);
}
DenseTensor max_eigenvalue_tensor;
dev_ctx.template Alloc<T>(&max_eigenvalue_tensor);
max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims()));
phi::MaxKernel<T, Context>(dev_ctx,
eigenvalue_tensor,
std::vector<int64_t>{-1},
false,
&max_eigenvalue_tensor);
DenseTensor temp_rtol_tensor;
temp_rtol_tensor =
phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T));
DenseTensor rtol_tensor =
phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor);
DenseTensor tol_tensor;
tol_tensor.Resize(dim_out);
dev_ctx.template Alloc<T>(&tol_tensor);
funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>(
dev_ctx,
atol_tensor,
rtol_tensor,
-1,
GreaterElementFunctor<T>(),
&tol_tensor);
tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1));
DenseTensor compare_result;
compare_result.Resize(detail::NewAxisDim(dim_out, k));
dev_ctx.template Alloc<int64_t>(&compare_result);
int axis = -1;
funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>(
dev_ctx,
eigenvalue_tensor,
tol_tensor,
axis,
funcs::GreaterThanFunctor<T, int64_t>(),
&compare_result);
phi::SumKernel<int64_t>(dev_ctx,
compare_result,
std::vector<int64_t>{-1},
compare_result.dtype(),
false,
out);
}
} // namespace phi
PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only
GPU,
ALL_LAYOUT,
phi::MatrixRankTolKernel,
float,
double) {}
#endif // not PADDLE_WITH_HIP
|
82e88f8daf78aee9f420c8cae859a7dcf944c86e.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include "paddle/phi/kernels/matrix_rank_tol_kernel.h"
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/abs_kernel.h"
#include "paddle/phi/kernels/elementwise_multiply_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/compare_functors.h"
#include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h"
#include "paddle/phi/kernels/reduce_max_kernel.h"
#include "paddle/phi/kernels/reduce_sum_kernel.h"
namespace phi {
template <typename T>
static void GesvdjBatched(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
T* A,
T* U,
T* V,
T* S,
int* info,
int thin_UV = 1);
template <typename T>
void SyevjBatched(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
T* A,
T* W,
int* info);
template <>
void GesvdjBatched<float>(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
float* A,
float* U,
float* V,
float* S,
int* info,
int thin_UV) {
// do not compute singular vectors
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
gesvdjInfo_t gesvdj_params = NULL;
int lda = m;
int ldu = m;
int ldt = n;
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params));
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnSgesvdj_bufferSize(handle,
jobz,
thin_UV,
m,
n,
A,
lda,
S,
U,
ldu,
V,
ldt,
&lwork,
gesvdj_params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
int stride_A = lda * n;
int stride_U = ldu * (thin_UV ? k : m);
int stride_V = ldt * (thin_UV ? k : n);
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgesvdj(handle,
jobz,
thin_UV,
m,
n,
A + stride_A * i,
lda,
S + k * i,
U + stride_U * i,
ldu,
V + stride_V * i,
ldt,
workspace_ptr,
lwork,
info,
gesvdj_params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
template <>
void GesvdjBatched<double>(const phi::GPUContext& dev_ctx,
int batchSize,
int m,
int n,
int k,
double* A,
double* U,
double* V,
double* S,
int* info,
int thin_UV) {
// do not compute singular vectors
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
gesvdjInfo_t gesvdj_params = NULL;
int lda = m;
int ldu = m;
int ldt = n;
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params));
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnDgesvdj_bufferSize(handle,
jobz,
thin_UV,
m,
n,
A,
lda,
S,
U,
ldu,
V,
ldt,
&lwork,
gesvdj_params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
int stride_A = lda * n;
int stride_U = ldu * (thin_UV ? k : m);
int stride_V = ldt * (thin_UV ? k : n);
for (int i = 0; i < batchSize; ++i) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgesvdj(handle,
jobz,
thin_UV,
m,
n,
A + stride_A * i,
lda,
S + k * i,
U + stride_U * i,
ldu,
V + stride_V * i,
ldt,
workspace_ptr,
lwork,
info,
gesvdj_params));
// check the error info
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
template <>
void SyevjBatched<float>(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
float* A,
float* W,
int* info) {
auto handle = dev_ctx.cusolver_dn_handle();
// Compute eigenvalues only
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
// matrix is saved as column-major in cusolver.
// numpy and torch use lower triangle to compute eigenvalues, so here use
// upper triangle
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
int lda = n;
int stride_A = lda * n;
int lwork = 0;
syevjInfo_t params = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(¶ms));
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj_bufferSize(
handle, jobz, uplo, n, A, lda, W, &lwork, params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj(handle,
jobz,
uplo,
n,
A + stride_A * i,
lda,
W + n * i,
workspace_ptr,
lwork,
info,
params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver eigenvalues is not zero. [%d]",
i,
error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params));
}
template <>
void SyevjBatched<double>(const phi::GPUContext& dev_ctx,
int batchSize,
int n,
double* A,
double* W,
int* info) {
auto handle = dev_ctx.cusolver_dn_handle();
// Compute eigenvalues only
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
// upper triangle of A is stored
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
int lda = n;
int stride_A = lda * n;
int lwork = 0;
syevjInfo_t params = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(¶ms));
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj_bufferSize(
handle, jobz, uplo, n, A, lda, W, &lwork, params));
auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
for (int i = 0; i < batchSize; i++) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj(handle,
jobz,
uplo,
n,
A + stride_A * i,
lda,
W + n * i,
workspace_ptr,
lwork,
info,
params));
int error_info;
paddle::memory::Copy(phi::CPUPlace(),
&error_info,
dev_ctx.GetPlace(),
info,
sizeof(int),
dev_ctx.stream());
PADDLE_ENFORCE_EQ(
error_info,
0,
phi::errors::PreconditionNotMet(
"For batch [%d]: CUSolver eigenvalues is not zero. [%d]",
i,
error_info));
}
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params));
}
template <typename T, typename Context>
void MatrixRankTolKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& atol_tensor,
bool use_default_tol,
bool hermitian,
DenseTensor* out) {
auto* x_data = x.data<T>();
dev_ctx.template Alloc<int64_t>(out);
auto dim_x = x.dims();
auto dim_out = out->dims();
int rows = dim_x[dim_x.size() - 2];
int cols = dim_x[dim_x.size() - 1];
int k = std::min(rows, cols);
auto numel = x.numel();
int batches = numel / (rows * cols);
T rtol_T = 0;
if (use_default_tol) {
rtol_T = std::numeric_limits<T>::epsilon() * std::max(rows, cols);
}
// Must Copy X once, because the gesvdj will destory the content when exit.
DenseTensor x_tmp;
paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp);
auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches);
int* info_ptr = reinterpret_cast<int*>(info->ptr());
DenseTensor eigenvalue_tensor;
eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k));
auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor);
if (hermitian) {
SyevjBatched<T>(
dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr);
phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor);
} else {
DenseTensor U, VH;
U.Resize(detail::GetUDDim(dim_x, k));
VH.Resize(detail::GetVHDDim(dim_x, k));
auto* u_data = dev_ctx.template Alloc<T>(&U);
auto* vh_data = dev_ctx.template Alloc<T>(&VH);
GesvdjBatched<T>(dev_ctx,
batches,
cols,
rows,
k,
x_tmp.data<T>(),
vh_data,
u_data,
eigenvalue_data,
info_ptr,
1);
}
DenseTensor max_eigenvalue_tensor;
dev_ctx.template Alloc<T>(&max_eigenvalue_tensor);
max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims()));
phi::MaxKernel<T, Context>(dev_ctx,
eigenvalue_tensor,
std::vector<int64_t>{-1},
false,
&max_eigenvalue_tensor);
DenseTensor temp_rtol_tensor;
temp_rtol_tensor =
phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T));
DenseTensor rtol_tensor =
phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor);
DenseTensor tol_tensor;
tol_tensor.Resize(dim_out);
dev_ctx.template Alloc<T>(&tol_tensor);
funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>(
dev_ctx,
atol_tensor,
rtol_tensor,
-1,
GreaterElementFunctor<T>(),
&tol_tensor);
tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1));
DenseTensor compare_result;
compare_result.Resize(detail::NewAxisDim(dim_out, k));
dev_ctx.template Alloc<int64_t>(&compare_result);
int axis = -1;
funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>(
dev_ctx,
eigenvalue_tensor,
tol_tensor,
axis,
funcs::GreaterThanFunctor<T, int64_t>(),
&compare_result);
phi::SumKernel<int64_t>(dev_ctx,
compare_result,
std::vector<int64_t>{-1},
compare_result.dtype(),
false,
out);
}
} // namespace phi
PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only
GPU,
ALL_LAYOUT,
phi::MatrixRankTolKernel,
float,
double) {}
#endif // not PADDLE_WITH_HIP
|
8f4fe9d64121cb1ef837cef9a20ced017068e56a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----
// ---- Computes the potential field for a volume
// ---- Input: volume file, dimensions: X, Y, Z, output file name
// ---- Output: normalized potential field:
// 1 vector for each point in the volume
//
// Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea
//
//
// #define TRACE
#include "potVect.h"
#include <thrust/sort.h>
#define BOUND_SIZE 1200000
struct compareStruct {
__host__ __device__
bool operator()(VoxelPosition a, VoxelPosition b) {
if(a.z != b.z)
return a.z < b.z;
else if(a.y != b.y)
return a.y < b.y;
else
return a.x < b.x;
}
};
bool GetIndexOfBPInXYZRange(
short sx, short sy, short sz,
short ex, short ey, short ez,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex);
bool GetIndexOfBPInZRange(
short z1, short z2,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex);
bool GetIndexOfBPInYRange(
short y1, short y2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex);
bool GetIndexOfBPInXRange(
short x1, short x2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex);
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]);
bool SortByX(int startAt, int endAt, VoxelPosition Bound[]);
bool SortByY(int startAt, int endAt, VoxelPosition Bound[]);
bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]);
__global__ void normalize_vector(Vector* force,unsigned char* f, bool inOut,int slsz,int L)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=threadIdx.x;
int idx=k*slsz + j*L + i;
if(!inOut) {
// only for interior voxels we had calculated forces
if(f[idx] == EXTERIOR) return;
}
float r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
__global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=threadIdx.x;
int zStartIndex = 0;
int zEndIndex = numBound- 1;
int s;
for (s = 0; s < numBound; s++) {
if((k - Bound[s].z) <= PF_THRESHOLD) {
zStartIndex = s;
break;
}
}
for (s = numBound-1; s >= zStartIndex; s--) {
if((Bound[s].z - k) <= PF_THRESHOLD) {
zEndIndex = s;
break;
}
}
int yStartIndex = zStartIndex;
int yEndIndex = zEndIndex;
for (s = zStartIndex; s <= zEndIndex; s++) {
if((j - Bound[s].y) <= PF_THRESHOLD) {
yStartIndex = s;
break;
}
}
for (s = zEndIndex; s >= yStartIndex; s--) {
if((Bound[s].y - j) <= PF_THRESHOLD) {
yEndIndex = s;
break;
}
}
int idx=k*slsz + j*L + i;
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
if(!inOut) {
if(f[idx] == 0) {
// outside voxels have null force
return;
}
}
else {
// we don't know where the inside of the object is
// so we compute the vector field everywhere.
// NOTHING
}
if(f[idx] == SURF) return;
if(f[idx] == BOUNDARY) return;
int startIndex = yStartIndex;
int endIndex = yEndIndex;
for (s = yStartIndex; s <= yEndIndex; s++) {
if((i - Bound[s].x) <= PF_THRESHOLD) {
startIndex = s;
break;
}
}
for (s = yEndIndex; s >= startIndex; s--) {
if((Bound[s].x - i) <= PF_THRESHOLD) {
endIndex = s;
break;
}
}
if(endIndex < startIndex) {
// no boundary point is close enough to this point - take all the boundary points
startIndex = 0;
endIndex = numBound - 1;
}
for (s = startIndex; s <= endIndex; s++) {
// printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z);
/*
// visibility test - too slow
if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z,
Bound, numBound, &v1, &v2))
{
// check if this boundary pont is visible from the current position
if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) {
// not visible
continue;
}
}
*/
float v1 = i - Bound[s].x;
float v2 = j - Bound[s].y;
float v3 = k - Bound[s].z;
float r, t;
#ifdef EUCLIDEAN_METRIC
// euclidean metric
r = sqrtf(v1*v1 + v2*v2 + v3*v3);
#else
// simpler metric
r = abs(v1) + abs(v2) + abs(v3);
#endif
// r CAN BE 0 if we are computing the force
// at boundary voxels too
// if the current point is a BOUNDARY point,
// some r will be 0, and that should be
// ignored
if(r != 0.00) {
// raise r to the fieldStrenght+1 power
// so that the force is
// 1/(dist^fieldStrength)
t = 1.00;
for(int p = 0; p <= fieldStrenght; p++) {
t = t * r;
}
r = t;
force[idx].xd = force[idx].xd + (v1 / r);
force[idx].yd = force[idx].yd + (v2 / r);
force[idx].zd = force[idx].zd + (v3 / r);
}
}
}
__global__ void computePotentialFieldForBoundaryVoxels(int *ng, unsigned char* f, Vector* force, int slsz, bool inOut, int L) {
int k=blockIdx.x+1;
int j=blockIdx.y+1;
int i=threadIdx.x+1;
long idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
float var_xd=0.00;
float var_yd=0.00;
float var_zd=0.00;
// look at the neighbors and average the forces if not 0
//
int v1 = 0;
for(int s=0; s < 26; s++) {
long iidx = idx + ng[s]; // index of neighbor
// take only neighbors that are not SURF or BOUNDARY
// because those neighbors have force = 0
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
var_xd = var_xd + force[iidx].xd;
var_yd = var_yd + force[iidx].yd;
var_zd = var_zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
var_xd = var_xd / (double) v1;
var_yd= var_yd / (double) v1;
var_zd = var_zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
float r = var_xd*var_xd +
var_yd*var_yd +
var_zd*var_zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = var_xd / r;
force[idx].yd = var_yd / r;
force[idx].zd = var_zd/ r;
}
}
}
bool CalculatePotentialField(
int L, int M, int N, // [in] size of volume
unsigned char* f, // [in] volume flags
int fieldStrenght, // [in] potential field strenght
Vector* force, // [out] force field
bool inOut // [in] flag indicating that we don't
// know what the inside/outside of
// the object is. We have only point
// samples of the boundary.
// DEFAULT: false (only interior)
) {
//hipSetDevice(1);
int Lm1, Mm1, Nm1;
int i,j,k, s, p;
long idx, iidx, slsz, sz;
VoxelPosition* Bound;
int numBound = 0;
bool flagSurf, flagBound;
double r, t;
int v1, v2, v3;
int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex;
//
// check volume padding - fast version
//
if(!CheckVolumePadding(f, L, M, N)) {
printf("** Error - Object touches bounding box. Abort.\n");
exit(1);
}
#ifdef _DEBUG
printf("\t************ Potential Field calculation parameters: ******************\n");
#ifdef HALF_BOUNDARY_POINTS
printf("\t** Using only HALF of the boundary points.\n");
#else
printf("\t** Using ALL boundary points.\n");
#endif
#ifdef EUCLIDEAN_METRIC
printf("\t** Using EUCLIDEAN metric.\n");
#else
printf("\t** Using NON EUCLIDEAN metric.\n");
#endif
if(inOut) {
printf("\t** Inside and Outside.\n");
}
else {
printf("\t** Inside ONLY.\n");
}
printf("\t********* Potential Field calculation parameters - end ****************\n");
#endif
if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) {
printf("\nERROR allocating memory for boundary array! - Abort\n");
exit(1);
}
Lm1 = L - 1;
Mm1 = M - 1;
Nm1 = N - 1;
slsz = L*M; // slice size
sz = slsz*N;
// save all the boundary voxels in array Bound[]
for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
flagSurf = false;
flagBound = true;
idx = k*slsz + j*L + i;
// CASE 1: treat the inner layer
if (f[idx] == 0) continue;
//consider six face neighbors, if anyone is zero, it is a boundary voxel
iidx = k*slsz + j*L + i-1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select this one as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + j*L + i+1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j-1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j+1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k-1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k+1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
}
}
}
}
}
// restore idx to the right value
idx = k*slsz + j*L + i;
if (flagSurf) {
f[idx] = SURF;
if(flagBound) {
// if no neighbour of this voxel is already marked as boundary, then mark this one.
// or if we are taking all the boundary voxels
// (in this case flagBound stays true)
f[idx] = BOUNDARY;
Bound[numBound].x = i;
Bound[numBound].y = j;
Bound[numBound].z = k;
numBound++;
if(numBound >= BOUND_SIZE) {
printf("ERROR: too many boundary points detected !! - Abort.\n");
exit(1);
}
}
}
}
}
}
//printf("numBound = %d \n", numBound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-1: finding the boundary voxels.");
printf("\t--Found %d boundary voxels.\n", numBound);
#endif
/*
// output boundary voxels
FILE *ff;
unsigned char a;
long int b;
ff=fopen("bound.vol", "w");
for(idx=0; idx < L*M*N; idx++) {
if(f[idx] == BOUNDARY) {
a = 255;
}
else {
a = 0;
}
b = random();
if(b > RAND_MAX/2) {
a = 0;
}
fwrite(&a, sizeof(unsigned char), 1, ff);
b = 0;
}
fclose(ff);
exit(1);
*/
// sort the boundary array.
SortBoundaryArray(numBound, Bound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-2: sorting the boundary voxels.");
#ifdef TRACE
// print the boundary voxels
for(i=0; i < numBound; i++) {
printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z);
}
exit(1);
#endif
#endif
// Compute the potential field
printf("Computing potential field.\n");
dim3 dimBlock(L,1);
dim3 dimGrid(N,M);
VoxelPosition *d_bound;
unsigned char* d_f;
Vector* d_force;
hipMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N);
hipMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE);
hipMalloc((void **)&d_force,sizeof(Vector)*L*M*N);
hipMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,hipMemcpyHostToDevice);
hipMemcpy(d_force,force,sizeof(Vector)*L*M*N,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( compute_potential_field), dim3(dimGrid),dim3(dimBlock), 0, 0, d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght);
hipLaunchKernelGGL(( normalize_vector), dim3(dimGrid),dim3(dimBlock), 0, 0, d_force,d_f,inOut,slsz,L);
// idx = -1;
// for (k = 0; k < N; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-1);
// fflush(stdout);
// // find the boundary voxels that will influence this point
// // look at the Z coordinate
// zStartIndex = 0;
// zEndIndex = numBound- 1;
// for (s = 0; s < numBound; s++) {
// if((k - Bound[s].z) <= PF_THRESHOLD) {
// zStartIndex = s;
// break;
// }
// }
// for (s = numBound-1; s >= zStartIndex; s--) {
// if((Bound[s].z - k) <= PF_THRESHOLD) {
// zEndIndex = s;
// break;
// }
// }
// // printf("ZStart: %d\t ZEnd: %d\n", zStartIndex, zEndIndex);
// for (j = 0; j < M; j++) {
// // find the boundary voxels that will influence this point
// // look at the Y coordinate
// yStartIndex = zStartIndex;
// yEndIndex = zEndIndex;
// for (s = zStartIndex; s <= zEndIndex; s++) {
// if((j - Bound[s].y) <= PF_THRESHOLD) {
// yStartIndex = s;
// break;
// }
// }
// for (s = zEndIndex; s >= yStartIndex; s--) {
// if((Bound[s].y - j) <= PF_THRESHOLD) {
// yEndIndex = s;
// break;
// }
// }
// // printf("YStart: %d\t YEnd: %d\n", yStartIndex, yEndIndex);
// for (i = 0; i < L; i++) {
// // printf("Point: %d\t%d\t%d:\n", i, j, k);
// // idx = k*slsz + j*L + i;
// idx = idx + 1;
// force[idx].xd = 0.00;
// force[idx].yd = 0.00;
// force[idx].zd = 0.00;
// if(!inOut) {
// if(f[idx] == 0) {
// // outside voxels have null force
// continue;
// }
// }
// else {
// // we don't know where the inside of the object is
// // so we compute the vector field everywhere.
// // NOTHING
// }
// // surface voxels (including those selected for the
// // field calculation)
// // are ignored for now. The force there will be
// // the average of their neighbors
// // if we are to compute the force at boundary
// // voxels too, the force will point
// // towards the exterior of the object
// // (example: a 30x30x100 box)
// if(f[idx] == SURF) continue;
// if(f[idx] == BOUNDARY) continue;
// // find the boundary voxels that will influence this point
// // look at the X coordinate
// startIndex = yStartIndex;
// endIndex = yEndIndex;
// for (s = yStartIndex; s <= yEndIndex; s++) {
// if((i - Bound[s].x) <= PF_THRESHOLD) {
// startIndex = s;
// break;
// }
// }
// for (s = yEndIndex; s >= startIndex; s--) {
// if((Bound[s].x - i) <= PF_THRESHOLD) {
// endIndex = s;
// break;
// }
// }
// // printf("Start at: %d, end at: %d\n", startIndex, endIndex);
// // exit(-1);
// if(endIndex < startIndex) {
// // no boundary point is close enough to this point - take all the boundary points
// startIndex = 0;
// endIndex = numBound - 1;
// }
// for (s = startIndex; s <= endIndex; s++) {
// // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z);
// /*
// // visibility test - too slow
// if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z,
// Bound, numBound, &v1, &v2))
// {
// // check if this boundary pont is visible from the current position
// if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) {
// // not visible
// continue;
// }
// }
// */
// v1 = i - Bound[s].x;
// v2 = j - Bound[s].y;
// v3 = k - Bound[s].z;
// #ifdef EUCLIDEAN_METRIC
// // euclidean metric
// r = sqrt(v1*v1 + v2*v2 + v3*v3);
// #else
// // simpler metric
// r = abs(v1) + abs(v2) + abs(v3);
// #endif
// // r CAN BE 0 if we are computing the force
// // at boundary voxels too
// // if the current point is a BOUNDARY point,
// // some r will be 0, and that should be
// // ignored
// if(r != 0.00) {
// // raise r to the fieldStrenght+1 power
// // so that the force is
// // 1/(dist^fieldStrength)
// t = 1.00;
// for(p = 0; p <= fieldStrenght; p++) {
// t = t * r;
// }
// r = t;
// force[idx].xd = force[idx].xd + (v1 / r);
// force[idx].yd = force[idx].yd + (v2 / r);
// force[idx].zd = force[idx].zd + (v3 / r);
// }
// }
// /*
// printf("First point with force vector != 0\n");
// printf("%f\t%f\t%f: %d, %d, %d\n", force[idx].xd, force[idx].yd, force[idx].zd, i, j, k);
// exit(1);
// */
// }
// }
// }
// delete the Bound array - don't need it anymore
delete [] Bound;
#ifdef _DEBUG
PrintElapsedTime("\tPF-3: computing potential field for inside voxels.");
#endif
// normalize force vectors:
// for(idx=0; idx < L*M*N; idx++) {
// if(!inOut) {
// // only for interior voxels we had calculated forces
// if(f[idx] == EXTERIOR) continue;
// }
// r = force[idx].xd*force[idx].xd +
// force[idx].yd*force[idx].yd +
// force[idx].zd*force[idx].zd;
// if(r > 0.00) {
// r = sqrt(r);
// force[idx].xd = force[idx].xd / r;
// force[idx].yd = force[idx].yd / r;
// force[idx].zd = force[idx].zd / r;
// }
// }
#ifdef _DEBUG
PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels.");
#endif
// if we know the inside from the outside
// calculate the force at the surface voxels as the average of the
// interior neighbors
if (!inOut) {
//neighbors:
int ng[26];
// face neighbors
ng[0] = + slsz + 0 + 0;
ng[1] = - slsz + 0 + 0;
ng[2] = + 0 + L + 0;
ng[3] = + 0 - L + 0;
ng[4] = + 0 + 0 + 1;
ng[5] = + 0 + 0 - 1;
// v-neighbors
ng[6] = - slsz - L - 1;
ng[7] = - slsz - L + 1;
ng[8] = - slsz + L - 1;
ng[9] = - slsz + L + 1;
ng[10] = + slsz - L - 1;
ng[11] = + slsz - L + 1;
ng[12] = + slsz + L - 1;
ng[13] = + slsz + L + 1;
// e-neighbors
ng[14] = + slsz + L + 0;
ng[15] = + slsz - L + 0;
ng[16] = - slsz + L + 0;
ng[17] = - slsz - L + 0;
ng[18] = + slsz + 0 + 1;
ng[19] = + slsz + 0 - 1;
ng[20] = - slsz + 0 + 1;
ng[21] = - slsz + 0 - 1;
ng[22] = + 0 + L + 1;
ng[23] = + 0 + L - 1;
ng[24] = + 0 - L + 1;
ng[25] = + 0 - L - 1;
dim3 dimBlock(Lm1-1,1);
dim3 dimGrid(Nm1-1,Mm1-1);
int *d_ng;
hipMalloc((void **)&d_ng,sizeof(int)*26);
hipMemcpy(d_ng,ng,sizeof(int)*26,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computePotentialFieldForBoundaryVoxels), dim3(dimGrid),dim3(dimBlock), 0, 0, d_ng, d_f, d_force, slsz, inOut, L);
/*for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
// look at the neighbors and average the forces if not 0
//
v1 = 0;
for(s=0; s < 26; s++) {
iidx = idx + ng[s]; // index of neighbor
// take only neighbors that are not SURF or BOUNDARY
// because those neighbors have force = 0
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
force[idx].xd = force[idx].xd + force[iidx].xd;
force[idx].yd = force[idx].yd + force[iidx].yd;
force[idx].zd = force[idx].zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
force[idx].xd = force[idx].xd / (double) v1;
force[idx].yd = force[idx].yd / (double) v1;
force[idx].zd = force[idx].zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrt(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
}
}
}*/
hipMemcpy(force,d_force,sizeof(Vector)*L*M*N,hipMemcpyDeviceToHost);
}
else {
// we don't know the inside from the outside.
// boundary points remain 0
// nothing to do
}
#ifdef _DEBUG
PrintElapsedTime("\tPF-5: computing potential field for boundary voxels.");
#endif
return true;
}
// Sort the boundary array so that we can speed up the potential field calculation: ZYX in that order
// selection sort
/*bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
int st, i;
short zvst, yvst;
// sort by Z
SortByZ(0, numBound-1, Bound);
// then by Y
st = 0;
zvst = Bound[st].z;
for(i=0; i < numBound; i++) {
if(Bound[i].z != zvst) {
SortByY(st, i-1, Bound);
st = i;
zvst = Bound[st].z;
}
}
SortByY(st, numBound-1, Bound);
// then by X
st = 0;
zvst = Bound[st].z;
yvst = Bound[st].y;
for(i=0; i < numBound; i++) {
if((Bound[i].y != yvst) || (Bound[i].z != zvst)) {
SortByX(st, i-1, Bound);
st = i;
zvst = Bound[st].z;
yvst = Bound[st].y;
}
}
SortByX(st, numBound-1, Bound);
return true;
}*/
compareStruct comp;
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
thrust::sort(Bound, Bound+numBound, comp);
return true;
}
bool SortByX(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].x;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].x < crtMin) {
minIndex = j;
crtMin = Bound[j].x;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
bool SortByY(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].y;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].y < crtMin) {
minIndex = j;
crtMin = Bound[j].y;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].z;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].z < crtMin) {
minIndex = j;
crtMin = Bound[j].z;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
// returns the start and endindex of boundary point found in a region
// in space bound by a box defined by the 2 points.
// it doesn't change startIndex or endIndex if it returns false;
// returns true if it finds any boundary point in that region, or false otherwise.
bool GetIndexOfBPInXYZRange(
short sx, short sy, short sz,
short ex, short ey, short ez,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex)
{
int si1, ei1, si2, ei2; // temporary start and end indexes
//
if(GetIndexOfBPInZRange(sz, ez, Bound, numBound, &si1, &ei1)) {
if(GetIndexOfBPInYRange(sy, ey, Bound, numBound, si1, ei1, &si2, &ei2)) {
if(GetIndexOfBPInXRange(sx, ex, Bound, numBound, si2, ei2, &si1, &ei1)) {
(*startIndex) = si1;
(*endIndex) = ei1;
return true;
}
}
}
return false;
}
bool GetIndexOfBPInZRange(
short z1, short z2,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex)
{
short minz, maxz;
int s;
int si;
// sort the 2 z values;
if(z1 < z2) {
minz = z1; maxz = z2;
}
else {
minz = z2; maxz = z1;
}
si = -1;
for (s = 0; s < numBound; s++) {
if((minz - Bound[s].z) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = numBound-1; s >= (*startIndex); s--) {
if((Bound[s].z - maxz) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
bool GetIndexOfBPInYRange(
short y1, short y2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex)
{
short miny, maxy;
int s;
int si;
// sort the 2 y values;
if(y1 < y2) {
miny = y1; maxy = y2;
}
else {
miny = y2; maxy = y1;
}
// start the search at startAt and end it endAt
si = -1;
for (s = startAt; s <= endAt; s++) {
if((miny - Bound[s].y) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = endAt; s >= (*startIndex); s--) {
if((Bound[s].y - maxy) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
bool GetIndexOfBPInXRange(
short x1, short x2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex)
{
short minx, maxx;
int s;
int si;
// sort the 2 x values;
if(x1 < x2) {
minx = x1; maxx = x2;
}
else {
minx = x2; maxx = x1;
}
// start the search at startAt and end it endAt
si = -1;
for (s = startAt; s <= endAt; s++) {
if((minx - Bound[s].x) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = endAt; s >= (*startIndex); s--) {
if((Bound[s].x - maxx) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
|
8f4fe9d64121cb1ef837cef9a20ced017068e56a.cu
|
// ----
// ---- Computes the potential field for a volume
// ---- Input: volume file, dimensions: X, Y, Z, output file name
// ---- Output: normalized potential field:
// 1 vector for each point in the volume
//
// Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea
//
//
// #define TRACE
#include "potVect.h"
#include <thrust/sort.h>
#define BOUND_SIZE 1200000
struct compareStruct {
__host__ __device__
bool operator()(VoxelPosition a, VoxelPosition b) {
if(a.z != b.z)
return a.z < b.z;
else if(a.y != b.y)
return a.y < b.y;
else
return a.x < b.x;
}
};
bool GetIndexOfBPInXYZRange(
short sx, short sy, short sz,
short ex, short ey, short ez,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex);
bool GetIndexOfBPInZRange(
short z1, short z2,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex);
bool GetIndexOfBPInYRange(
short y1, short y2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex);
bool GetIndexOfBPInXRange(
short x1, short x2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex);
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]);
bool SortByX(int startAt, int endAt, VoxelPosition Bound[]);
bool SortByY(int startAt, int endAt, VoxelPosition Bound[]);
bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]);
__global__ void normalize_vector(Vector* force,unsigned char* f, bool inOut,int slsz,int L)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=threadIdx.x;
int idx=k*slsz + j*L + i;
if(!inOut) {
// only for interior voxels we had calculated forces
if(f[idx] == EXTERIOR) return;
}
float r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
__global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght)
{
int k=blockIdx.x;
int j=blockIdx.y;
int i=threadIdx.x;
int zStartIndex = 0;
int zEndIndex = numBound- 1;
int s;
for (s = 0; s < numBound; s++) {
if((k - Bound[s].z) <= PF_THRESHOLD) {
zStartIndex = s;
break;
}
}
for (s = numBound-1; s >= zStartIndex; s--) {
if((Bound[s].z - k) <= PF_THRESHOLD) {
zEndIndex = s;
break;
}
}
int yStartIndex = zStartIndex;
int yEndIndex = zEndIndex;
for (s = zStartIndex; s <= zEndIndex; s++) {
if((j - Bound[s].y) <= PF_THRESHOLD) {
yStartIndex = s;
break;
}
}
for (s = zEndIndex; s >= yStartIndex; s--) {
if((Bound[s].y - j) <= PF_THRESHOLD) {
yEndIndex = s;
break;
}
}
int idx=k*slsz + j*L + i;
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
if(!inOut) {
if(f[idx] == 0) {
// outside voxels have null force
return;
}
}
else {
// we don't know where the inside of the object is
// so we compute the vector field everywhere.
// NOTHING
}
if(f[idx] == SURF) return;
if(f[idx] == BOUNDARY) return;
int startIndex = yStartIndex;
int endIndex = yEndIndex;
for (s = yStartIndex; s <= yEndIndex; s++) {
if((i - Bound[s].x) <= PF_THRESHOLD) {
startIndex = s;
break;
}
}
for (s = yEndIndex; s >= startIndex; s--) {
if((Bound[s].x - i) <= PF_THRESHOLD) {
endIndex = s;
break;
}
}
if(endIndex < startIndex) {
// no boundary point is close enough to this point - take all the boundary points
startIndex = 0;
endIndex = numBound - 1;
}
for (s = startIndex; s <= endIndex; s++) {
// printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z);
/*
// visibility test - too slow
if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z,
Bound, numBound, &v1, &v2))
{
// check if this boundary pont is visible from the current position
if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) {
// not visible
continue;
}
}
*/
float v1 = i - Bound[s].x;
float v2 = j - Bound[s].y;
float v3 = k - Bound[s].z;
float r, t;
#ifdef EUCLIDEAN_METRIC
// euclidean metric
r = sqrtf(v1*v1 + v2*v2 + v3*v3);
#else
// simpler metric
r = abs(v1) + abs(v2) + abs(v3);
#endif
// r CAN BE 0 if we are computing the force
// at boundary voxels too
// if the current point is a BOUNDARY point,
// some r will be 0, and that should be
// ignored
if(r != 0.00) {
// raise r to the fieldStrenght+1 power
// so that the force is
// 1/(dist^fieldStrength)
t = 1.00;
for(int p = 0; p <= fieldStrenght; p++) {
t = t * r;
}
r = t;
force[idx].xd = force[idx].xd + (v1 / r);
force[idx].yd = force[idx].yd + (v2 / r);
force[idx].zd = force[idx].zd + (v3 / r);
}
}
}
__global__ void computePotentialFieldForBoundaryVoxels(int *ng, unsigned char* f, Vector* force, int slsz, bool inOut, int L) {
int k=blockIdx.x+1;
int j=blockIdx.y+1;
int i=threadIdx.x+1;
long idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
float var_xd=0.00;
float var_yd=0.00;
float var_zd=0.00;
// look at the neighbors and average the forces if not 0
//
int v1 = 0;
for(int s=0; s < 26; s++) {
long iidx = idx + ng[s]; // index of neighbor
// take only neighbors that are not SURF or BOUNDARY
// because those neighbors have force = 0
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
var_xd = var_xd + force[iidx].xd;
var_yd = var_yd + force[iidx].yd;
var_zd = var_zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
var_xd = var_xd / (double) v1;
var_yd= var_yd / (double) v1;
var_zd = var_zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
float r = var_xd*var_xd +
var_yd*var_yd +
var_zd*var_zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = var_xd / r;
force[idx].yd = var_yd / r;
force[idx].zd = var_zd/ r;
}
}
}
bool CalculatePotentialField(
int L, int M, int N, // [in] size of volume
unsigned char* f, // [in] volume flags
int fieldStrenght, // [in] potential field strenght
Vector* force, // [out] force field
bool inOut // [in] flag indicating that we don't
// know what the inside/outside of
// the object is. We have only point
// samples of the boundary.
// DEFAULT: false (only interior)
) {
//cudaSetDevice(1);
int Lm1, Mm1, Nm1;
int i,j,k, s, p;
long idx, iidx, slsz, sz;
VoxelPosition* Bound;
int numBound = 0;
bool flagSurf, flagBound;
double r, t;
int v1, v2, v3;
int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex;
//
// check volume padding - fast version
//
if(!CheckVolumePadding(f, L, M, N)) {
printf("** Error - Object touches bounding box. Abort.\n");
exit(1);
}
#ifdef _DEBUG
printf("\t************ Potential Field calculation parameters: ******************\n");
#ifdef HALF_BOUNDARY_POINTS
printf("\t** Using only HALF of the boundary points.\n");
#else
printf("\t** Using ALL boundary points.\n");
#endif
#ifdef EUCLIDEAN_METRIC
printf("\t** Using EUCLIDEAN metric.\n");
#else
printf("\t** Using NON EUCLIDEAN metric.\n");
#endif
if(inOut) {
printf("\t** Inside and Outside.\n");
}
else {
printf("\t** Inside ONLY.\n");
}
printf("\t********* Potential Field calculation parameters - end ****************\n");
#endif
if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) {
printf("\nERROR allocating memory for boundary array! - Abort\n");
exit(1);
}
Lm1 = L - 1;
Mm1 = M - 1;
Nm1 = N - 1;
slsz = L*M; // slice size
sz = slsz*N;
// save all the boundary voxels in array Bound[]
for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
flagSurf = false;
flagBound = true;
idx = k*slsz + j*L + i;
// CASE 1: treat the inner layer
if (f[idx] == 0) continue;
//consider six face neighbors, if anyone is zero, it is a boundary voxel
iidx = k*slsz + j*L + i-1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select this one as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + j*L + i+1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j-1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j+1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k-1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k+1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
}
}
}
}
}
// restore idx to the right value
idx = k*slsz + j*L + i;
if (flagSurf) {
f[idx] = SURF;
if(flagBound) {
// if no neighbour of this voxel is already marked as boundary, then mark this one.
// or if we are taking all the boundary voxels
// (in this case flagBound stays true)
f[idx] = BOUNDARY;
Bound[numBound].x = i;
Bound[numBound].y = j;
Bound[numBound].z = k;
numBound++;
if(numBound >= BOUND_SIZE) {
printf("ERROR: too many boundary points detected !! - Abort.\n");
exit(1);
}
}
}
}
}
}
//printf("numBound = %d \n", numBound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-1: finding the boundary voxels.");
printf("\t--Found %d boundary voxels.\n", numBound);
#endif
/*
// output boundary voxels
FILE *ff;
unsigned char a;
long int b;
ff=fopen("bound.vol", "w");
for(idx=0; idx < L*M*N; idx++) {
if(f[idx] == BOUNDARY) {
a = 255;
}
else {
a = 0;
}
b = random();
if(b > RAND_MAX/2) {
a = 0;
}
fwrite(&a, sizeof(unsigned char), 1, ff);
b = 0;
}
fclose(ff);
exit(1);
*/
// sort the boundary array.
SortBoundaryArray(numBound, Bound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-2: sorting the boundary voxels.");
#ifdef TRACE
// print the boundary voxels
for(i=0; i < numBound; i++) {
printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z);
}
exit(1);
#endif
#endif
// Compute the potential field
printf("Computing potential field.\n");
dim3 dimBlock(L,1);
dim3 dimGrid(N,M);
VoxelPosition *d_bound;
unsigned char* d_f;
Vector* d_force;
cudaMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N);
cudaMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE);
cudaMalloc((void **)&d_force,sizeof(Vector)*L*M*N);
cudaMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_force,force,sizeof(Vector)*L*M*N,cudaMemcpyHostToDevice);
compute_potential_field<<<dimGrid,dimBlock>>>(d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght);
normalize_vector<<<dimGrid,dimBlock>>>(d_force,d_f,inOut,slsz,L);
// idx = -1;
// for (k = 0; k < N; k++) {
// printf("\tProcessing plane %d out of %d\r", k, N-1);
// fflush(stdout);
// // find the boundary voxels that will influence this point
// // look at the Z coordinate
// zStartIndex = 0;
// zEndIndex = numBound- 1;
// for (s = 0; s < numBound; s++) {
// if((k - Bound[s].z) <= PF_THRESHOLD) {
// zStartIndex = s;
// break;
// }
// }
// for (s = numBound-1; s >= zStartIndex; s--) {
// if((Bound[s].z - k) <= PF_THRESHOLD) {
// zEndIndex = s;
// break;
// }
// }
// // printf("ZStart: %d\t ZEnd: %d\n", zStartIndex, zEndIndex);
// for (j = 0; j < M; j++) {
// // find the boundary voxels that will influence this point
// // look at the Y coordinate
// yStartIndex = zStartIndex;
// yEndIndex = zEndIndex;
// for (s = zStartIndex; s <= zEndIndex; s++) {
// if((j - Bound[s].y) <= PF_THRESHOLD) {
// yStartIndex = s;
// break;
// }
// }
// for (s = zEndIndex; s >= yStartIndex; s--) {
// if((Bound[s].y - j) <= PF_THRESHOLD) {
// yEndIndex = s;
// break;
// }
// }
// // printf("YStart: %d\t YEnd: %d\n", yStartIndex, yEndIndex);
// for (i = 0; i < L; i++) {
// // printf("Point: %d\t%d\t%d:\n", i, j, k);
// // idx = k*slsz + j*L + i;
// idx = idx + 1;
// force[idx].xd = 0.00;
// force[idx].yd = 0.00;
// force[idx].zd = 0.00;
// if(!inOut) {
// if(f[idx] == 0) {
// // outside voxels have null force
// continue;
// }
// }
// else {
// // we don't know where the inside of the object is
// // so we compute the vector field everywhere.
// // NOTHING
// }
// // surface voxels (including those selected for the
// // field calculation)
// // are ignored for now. The force there will be
// // the average of their neighbors
// // if we are to compute the force at boundary
// // voxels too, the force will point
// // towards the exterior of the object
// // (example: a 30x30x100 box)
// if(f[idx] == SURF) continue;
// if(f[idx] == BOUNDARY) continue;
// // find the boundary voxels that will influence this point
// // look at the X coordinate
// startIndex = yStartIndex;
// endIndex = yEndIndex;
// for (s = yStartIndex; s <= yEndIndex; s++) {
// if((i - Bound[s].x) <= PF_THRESHOLD) {
// startIndex = s;
// break;
// }
// }
// for (s = yEndIndex; s >= startIndex; s--) {
// if((Bound[s].x - i) <= PF_THRESHOLD) {
// endIndex = s;
// break;
// }
// }
// // printf("Start at: %d, end at: %d\n", startIndex, endIndex);
// // exit(-1);
// if(endIndex < startIndex) {
// // no boundary point is close enough to this point - take all the boundary points
// startIndex = 0;
// endIndex = numBound - 1;
// }
// for (s = startIndex; s <= endIndex; s++) {
// // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z);
// /*
// // visibility test - too slow
// if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z,
// Bound, numBound, &v1, &v2))
// {
// // check if this boundary pont is visible from the current position
// if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) {
// // not visible
// continue;
// }
// }
// */
// v1 = i - Bound[s].x;
// v2 = j - Bound[s].y;
// v3 = k - Bound[s].z;
// #ifdef EUCLIDEAN_METRIC
// // euclidean metric
// r = sqrt(v1*v1 + v2*v2 + v3*v3);
// #else
// // simpler metric
// r = abs(v1) + abs(v2) + abs(v3);
// #endif
// // r CAN BE 0 if we are computing the force
// // at boundary voxels too
// // if the current point is a BOUNDARY point,
// // some r will be 0, and that should be
// // ignored
// if(r != 0.00) {
// // raise r to the fieldStrenght+1 power
// // so that the force is
// // 1/(dist^fieldStrength)
// t = 1.00;
// for(p = 0; p <= fieldStrenght; p++) {
// t = t * r;
// }
// r = t;
// force[idx].xd = force[idx].xd + (v1 / r);
// force[idx].yd = force[idx].yd + (v2 / r);
// force[idx].zd = force[idx].zd + (v3 / r);
// }
// }
// /*
// printf("First point with force vector != 0\n");
// printf("%f\t%f\t%f: %d, %d, %d\n", force[idx].xd, force[idx].yd, force[idx].zd, i, j, k);
// exit(1);
// */
// }
// }
// }
// delete the Bound array - don't need it anymore
delete [] Bound;
#ifdef _DEBUG
PrintElapsedTime("\tPF-3: computing potential field for inside voxels.");
#endif
// normalize force vectors:
// for(idx=0; idx < L*M*N; idx++) {
// if(!inOut) {
// // only for interior voxels we had calculated forces
// if(f[idx] == EXTERIOR) continue;
// }
// r = force[idx].xd*force[idx].xd +
// force[idx].yd*force[idx].yd +
// force[idx].zd*force[idx].zd;
// if(r > 0.00) {
// r = sqrt(r);
// force[idx].xd = force[idx].xd / r;
// force[idx].yd = force[idx].yd / r;
// force[idx].zd = force[idx].zd / r;
// }
// }
#ifdef _DEBUG
PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels.");
#endif
// if we know the inside from the outside
// calculate the force at the surface voxels as the average of the
// interior neighbors
if (!inOut) {
//neighbors:
int ng[26];
// face neighbors
ng[0] = + slsz + 0 + 0;
ng[1] = - slsz + 0 + 0;
ng[2] = + 0 + L + 0;
ng[3] = + 0 - L + 0;
ng[4] = + 0 + 0 + 1;
ng[5] = + 0 + 0 - 1;
// v-neighbors
ng[6] = - slsz - L - 1;
ng[7] = - slsz - L + 1;
ng[8] = - slsz + L - 1;
ng[9] = - slsz + L + 1;
ng[10] = + slsz - L - 1;
ng[11] = + slsz - L + 1;
ng[12] = + slsz + L - 1;
ng[13] = + slsz + L + 1;
// e-neighbors
ng[14] = + slsz + L + 0;
ng[15] = + slsz - L + 0;
ng[16] = - slsz + L + 0;
ng[17] = - slsz - L + 0;
ng[18] = + slsz + 0 + 1;
ng[19] = + slsz + 0 - 1;
ng[20] = - slsz + 0 + 1;
ng[21] = - slsz + 0 - 1;
ng[22] = + 0 + L + 1;
ng[23] = + 0 + L - 1;
ng[24] = + 0 - L + 1;
ng[25] = + 0 - L - 1;
dim3 dimBlock(Lm1-1,1);
dim3 dimGrid(Nm1-1,Mm1-1);
int *d_ng;
cudaMalloc((void **)&d_ng,sizeof(int)*26);
cudaMemcpy(d_ng,ng,sizeof(int)*26,cudaMemcpyHostToDevice);
computePotentialFieldForBoundaryVoxels<<<dimGrid,dimBlock>>>(d_ng, d_f, d_force, slsz, inOut, L);
/*for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
// look at the neighbors and average the forces if not 0
//
v1 = 0;
for(s=0; s < 26; s++) {
iidx = idx + ng[s]; // index of neighbor
// take only neighbors that are not SURF or BOUNDARY
// because those neighbors have force = 0
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
force[idx].xd = force[idx].xd + force[iidx].xd;
force[idx].yd = force[idx].yd + force[iidx].yd;
force[idx].zd = force[idx].zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
force[idx].xd = force[idx].xd / (double) v1;
force[idx].yd = force[idx].yd / (double) v1;
force[idx].zd = force[idx].zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrt(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
}
}
}*/
cudaMemcpy(force,d_force,sizeof(Vector)*L*M*N,cudaMemcpyDeviceToHost);
}
else {
// we don't know the inside from the outside.
// boundary points remain 0
// nothing to do
}
#ifdef _DEBUG
PrintElapsedTime("\tPF-5: computing potential field for boundary voxels.");
#endif
return true;
}
// Sort the boundary array so that we can speed up the potential field calculation: ZYX in that order
// selection sort
/*bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
int st, i;
short zvst, yvst;
// sort by Z
SortByZ(0, numBound-1, Bound);
// then by Y
st = 0;
zvst = Bound[st].z;
for(i=0; i < numBound; i++) {
if(Bound[i].z != zvst) {
SortByY(st, i-1, Bound);
st = i;
zvst = Bound[st].z;
}
}
SortByY(st, numBound-1, Bound);
// then by X
st = 0;
zvst = Bound[st].z;
yvst = Bound[st].y;
for(i=0; i < numBound; i++) {
if((Bound[i].y != yvst) || (Bound[i].z != zvst)) {
SortByX(st, i-1, Bound);
st = i;
zvst = Bound[st].z;
yvst = Bound[st].y;
}
}
SortByX(st, numBound-1, Bound);
return true;
}*/
compareStruct comp;
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
thrust::sort(Bound, Bound+numBound, comp);
return true;
}
bool SortByX(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].x;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].x < crtMin) {
minIndex = j;
crtMin = Bound[j].x;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
bool SortByY(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].y;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].y < crtMin) {
minIndex = j;
crtMin = Bound[j].y;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]) {
int i, j, minIndex, crtMin;
short tmp;
for(i=startAt; i <= endAt; i++) {
minIndex = -1;
crtMin = Bound[i].z;
for(j=i+1; j <= endAt; j++) {
if(Bound[j].z < crtMin) {
minIndex = j;
crtMin = Bound[j].z;
}
}
if(minIndex != -1) {
// swap values.
tmp = Bound[i].x;
Bound[i].x = Bound[minIndex].x;
Bound[minIndex].x = tmp;
tmp = Bound[i].y;
Bound[i].y = Bound[minIndex].y;
Bound[minIndex].y = tmp;
tmp = Bound[i].z;
Bound[i].z = Bound[minIndex].z;
Bound[minIndex].z = tmp;
}
}
return true;
}
// returns the start and endindex of boundary point found in a region
// in space bound by a box defined by the 2 points.
// it doesn't change startIndex or endIndex if it returns false;
// returns true if it finds any boundary point in that region, or false otherwise.
bool GetIndexOfBPInXYZRange(
short sx, short sy, short sz,
short ex, short ey, short ez,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex)
{
int si1, ei1, si2, ei2; // temporary start and end indexes
//
if(GetIndexOfBPInZRange(sz, ez, Bound, numBound, &si1, &ei1)) {
if(GetIndexOfBPInYRange(sy, ey, Bound, numBound, si1, ei1, &si2, &ei2)) {
if(GetIndexOfBPInXRange(sx, ex, Bound, numBound, si2, ei2, &si1, &ei1)) {
(*startIndex) = si1;
(*endIndex) = ei1;
return true;
}
}
}
return false;
}
bool GetIndexOfBPInZRange(
short z1, short z2,
VoxelPosition* Bound, int numBound,
int* startIndex, int* endIndex)
{
short minz, maxz;
int s;
int si;
// sort the 2 z values;
if(z1 < z2) {
minz = z1; maxz = z2;
}
else {
minz = z2; maxz = z1;
}
si = -1;
for (s = 0; s < numBound; s++) {
if((minz - Bound[s].z) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = numBound-1; s >= (*startIndex); s--) {
if((Bound[s].z - maxz) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
bool GetIndexOfBPInYRange(
short y1, short y2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex)
{
short miny, maxy;
int s;
int si;
// sort the 2 y values;
if(y1 < y2) {
miny = y1; maxy = y2;
}
else {
miny = y2; maxy = y1;
}
// start the search at startAt and end it endAt
si = -1;
for (s = startAt; s <= endAt; s++) {
if((miny - Bound[s].y) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = endAt; s >= (*startIndex); s--) {
if((Bound[s].y - maxy) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
bool GetIndexOfBPInXRange(
short x1, short x2,
VoxelPosition* Bound, int numBound,
int startAt, int endAt,
int* startIndex, int* endIndex)
{
short minx, maxx;
int s;
int si;
// sort the 2 x values;
if(x1 < x2) {
minx = x1; maxx = x2;
}
else {
minx = x2; maxx = x1;
}
// start the search at startAt and end it endAt
si = -1;
for (s = startAt; s <= endAt; s++) {
if((minx - Bound[s].x) < 0) {
si = s;
break;
}
}
if(si == -1) {
// couldn't find any boundary voxel
return false;
}
(*startIndex) = si;
for (s = endAt; s >= (*startIndex); s--) {
if((Bound[s].x - maxx) < 0) {
(*endIndex) = s;
break;
}
}
return true;
}
|
4b60b95c23d9f136c1608af89ce08e3972a0c540.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid];
}
|
4b60b95c23d9f136c1608af89ce08e3972a0c540.cu
|
#include "includes.h"
__global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid];
}
|
030bfa0db1608a4e63a788304bbe92aed03423a0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
// nvcc -o CudaPasswordCracking CudaPasswordCracking.cu
__device__ int passcrack(char *crack){
char pass1[]="SH2973";
char pass2[]="KR3097";
char pass3[]="PK9736";
char pass4[]="BM4397";
char *s1 = crack;
char *s2 = crack;
char *s3 = crack;
char *s4 = crack;
char *p1 = pass1;
char *p2 = pass2;
char *p3 = pass3;
char *p4 = pass4;
while(*s1 == *p1){
if(*s1 == '\0'){
return 1;
}
s1++;
p1++;
}
while(*s2 == *p2){
if(*s2 == '\0'){
return 1;
}
s2++;
p2++;
}
while(*s3 == *p3){
if(*s3 == '\0'){
return 1;
}
s3++;
p3++;
}
while(*s4 == *p4){
if(*s4 == '\0'){
return 1;
}
s4++;
p4++;
}
return 0;
}
__global__ void kernel() {
char alphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char num[10] = {'0','1','2','3','4','5','6','7','8','9'};
char crack[7];
crack[6] = '\0';
int s, h, k, r;
for(s=0;s<10;s++){
for(h=0; h<10; h++){
for(k=0; k<10; k++){
for(r=0; r<10; r++){
crack[0] = alphabet[blockIdx.x];
crack[1] = alphabet[threadIdx.x];
crack[2] = num[s];
crack[3] = num[h];
crack[4] = num[k];
crack[5] = num[r];
if(passcrack(crack)){
printf("Password successfully cracked: %s\n", crack);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
030bfa0db1608a4e63a788304bbe92aed03423a0.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
// nvcc -o CudaPasswordCracking CudaPasswordCracking.cu
__device__ int passcrack(char *crack){
char pass1[]="SH2973";
char pass2[]="KR3097";
char pass3[]="PK9736";
char pass4[]="BM4397";
char *s1 = crack;
char *s2 = crack;
char *s3 = crack;
char *s4 = crack;
char *p1 = pass1;
char *p2 = pass2;
char *p3 = pass3;
char *p4 = pass4;
while(*s1 == *p1){
if(*s1 == '\0'){
return 1;
}
s1++;
p1++;
}
while(*s2 == *p2){
if(*s2 == '\0'){
return 1;
}
s2++;
p2++;
}
while(*s3 == *p3){
if(*s3 == '\0'){
return 1;
}
s3++;
p3++;
}
while(*s4 == *p4){
if(*s4 == '\0'){
return 1;
}
s4++;
p4++;
}
return 0;
}
__global__ void kernel() {
char alphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char num[10] = {'0','1','2','3','4','5','6','7','8','9'};
char crack[7];
crack[6] = '\0';
int s, h, k, r;
for(s=0;s<10;s++){
for(h=0; h<10; h++){
for(k=0; k<10; k++){
for(r=0; r<10; r++){
crack[0] = alphabet[blockIdx.x];
crack[1] = alphabet[threadIdx.x];
crack[2] = num[s];
crack[3] = num[h];
crack[4] = num[k];
crack[5] = num[r];
if(passcrack(crack)){
printf("Password successfully cracked: %s\n", crack);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
ce45b0eb5bb71810bdc4944d18fb08916e6e23fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "svgf/svgf.h"
#include "kernel/device_scene_context.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/StreamCompaction.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#include "renderer/pt_params.h"
__global__ void fillAOV(
hipSurfaceObject_t dst,
idaten::SVGFPathTracing::AOVMode mode,
int32_t width, int32_t height,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
hipSurfaceObject_t motionDetphBuffer,
const aten::CameraParameter camera,
idaten::context ctxt)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const aten::vec3 colors[] = {
aten::vec3(255, 0, 0),
aten::vec3( 0, 255, 0),
aten::vec3( 0, 0, 255),
aten::vec3(255, 255, 0),
aten::vec3(255, 0, 255),
aten::vec3( 0, 255, 255),
aten::vec3(128, 128, 128),
aten::vec3( 86, 99, 143),
aten::vec3( 71, 234, 126),
aten::vec3(124, 83, 53),
};
const auto idx = getIdx(ix, iy, width);
float s = (ix + 0.5f) / (float)(width);
float t = (iy + 0.5f) / (float)(height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, &camera, s, t);
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
float4 clr = make_float4(1);
if (mode == idaten::SVGFPathTracing::AOVMode::Normal) {
auto n = aovNormalDepth[idx] * 0.5f + 0.5f;
clr = make_float4(n.x, n.y, n.z, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Depth) {
// TODO
}
else if (mode == idaten::SVGFPathTracing::AOVMode::TexColor) {
clr = aovTexclrMeshid[idx];
}
else if (mode == idaten::SVGFPathTracing::AOVMode::WireFrame) {
bool isHitEdge = (isect.a < 1e-2) || (isect.b < 1e-2) || (1 - isect.a - isect.b < 1e-2);
clr = isHitEdge ? make_float4(0) : make_float4(1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::BaryCentric) {
auto c = 1 - isect.a - isect.b;
clr = make_float4(isect.a, isect.b, c, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Motion) {
float4 data;
surf2Dread(&data, motionDetphBuffer, ix * sizeof(float4), iy);
// TODO
float motionX = data.x;
float motionY = data.y;
clr = make_float4(motionX, motionY, 0, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::ObjId) {
#if 0
int32_t objid = isect.meshid;
#else
int32_t objid = isect.mtrlid;
#endif
if (objid >= 0) {
objid %= AT_COUNTOF(colors);
auto c = colors[objid];
clr = make_float4(c.x, c.y, c.z, 1);
clr /= 255.0f;
}
else {
clr = make_float4(0, 0, 0, 1);
}
}
surf2Dwrite(
clr,
dst,
ix * sizeof(float4), iy,
hipBoundaryModeTrap);
}
__global__ void pickPixel(
idaten::SVGFPathTracing::PickedInfo* dst,
int32_t ix, int32_t iy,
int32_t width, int32_t height,
const aten::CameraParameter camera,
const idaten::Path paths,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
idaten::context ctxt)
{
iy = height - 1 - iy;
float s = (ix + 0.5f) / (float)(camera.width);
float t = (iy + 0.5f) / (float)(camera.height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, &camera, s, t);
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
if (isHit) {
const auto idx = getIdx(ix, iy, width);
auto normalDepth = aovNormalDepth[idx];
auto texclrMeshid = aovTexclrMeshid[idx];
dst->ix = ix;
dst->iy = iy;
dst->color = aten::vec3(paths.contrib[idx].contrib.x, paths.contrib[idx].contrib.y, paths.contrib[idx].contrib.z);
dst->normal = aten::vec3(normalDepth.x, normalDepth.y, normalDepth.z);
dst->depth = normalDepth.w;
dst->meshid = (int32_t)texclrMeshid.w;
dst->triid = isect.triangle_id;
dst->mtrlid = isect.mtrlid;
}
else {
dst->ix = -1;
dst->iy = -1;
}
}
namespace idaten
{
void SVGFPathTracing::onDisplayAOV(
hipSurfaceObject_t outputSurf,
int32_t width, int32_t height)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
int32_t curaov_idx = getCurAovs();
auto& curaov = aov_[curaov_idx];
CudaGLResourceMapper<decltype(m_motionDepthBuffer)> rscmap(m_motionDepthBuffer);
auto gbuffer = m_motionDepthBuffer.bind();
fillAOV << <grid, block >> > (
outputSurf,
m_aovMode,
width, height,
curaov.get<AOVBuffer::NormalDepth>().data(),
curaov.get<AOVBuffer::AlbedoMeshId>().data(),
gbuffer,
m_cam,
ctxt_host_.ctxt);
}
void SVGFPathTracing::pick(
int32_t ix, int32_t iy,
int32_t width, int32_t height)
{
if (m_willPicklPixel) {
m_pick.resize(1);
int32_t curaov_idx = getCurAovs();
auto& curaov = aov_[curaov_idx];
pickPixel << <1, 1 >> > (
m_pick.data(),
m_pickedInfo.ix, m_pickedInfo.iy,
width, height,
m_cam,
path_host_->paths,
curaov.get<AOVBuffer::NormalDepth>().data(),
curaov.get<AOVBuffer::AlbedoMeshId>().data(),
ctxt_host_.ctxt);
m_pick.readFromDeviceToHostByNum(&m_pickedInfo);
m_willPicklPixel = false;
}
}
}
|
ce45b0eb5bb71810bdc4944d18fb08916e6e23fa.cu
|
#include "svgf/svgf.h"
#include "kernel/device_scene_context.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/StreamCompaction.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#include "renderer/pt_params.h"
__global__ void fillAOV(
cudaSurfaceObject_t dst,
idaten::SVGFPathTracing::AOVMode mode,
int32_t width, int32_t height,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
cudaSurfaceObject_t motionDetphBuffer,
const aten::CameraParameter camera,
idaten::context ctxt)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const aten::vec3 colors[] = {
aten::vec3(255, 0, 0),
aten::vec3( 0, 255, 0),
aten::vec3( 0, 0, 255),
aten::vec3(255, 255, 0),
aten::vec3(255, 0, 255),
aten::vec3( 0, 255, 255),
aten::vec3(128, 128, 128),
aten::vec3( 86, 99, 143),
aten::vec3( 71, 234, 126),
aten::vec3(124, 83, 53),
};
const auto idx = getIdx(ix, iy, width);
float s = (ix + 0.5f) / (float)(width);
float t = (iy + 0.5f) / (float)(height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, &camera, s, t);
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
float4 clr = make_float4(1);
if (mode == idaten::SVGFPathTracing::AOVMode::Normal) {
auto n = aovNormalDepth[idx] * 0.5f + 0.5f;
clr = make_float4(n.x, n.y, n.z, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Depth) {
// TODO
}
else if (mode == idaten::SVGFPathTracing::AOVMode::TexColor) {
clr = aovTexclrMeshid[idx];
}
else if (mode == idaten::SVGFPathTracing::AOVMode::WireFrame) {
bool isHitEdge = (isect.a < 1e-2) || (isect.b < 1e-2) || (1 - isect.a - isect.b < 1e-2);
clr = isHitEdge ? make_float4(0) : make_float4(1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::BaryCentric) {
auto c = 1 - isect.a - isect.b;
clr = make_float4(isect.a, isect.b, c, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::Motion) {
float4 data;
surf2Dread(&data, motionDetphBuffer, ix * sizeof(float4), iy);
// TODO
float motionX = data.x;
float motionY = data.y;
clr = make_float4(motionX, motionY, 0, 1);
}
else if (mode == idaten::SVGFPathTracing::AOVMode::ObjId) {
#if 0
int32_t objid = isect.meshid;
#else
int32_t objid = isect.mtrlid;
#endif
if (objid >= 0) {
objid %= AT_COUNTOF(colors);
auto c = colors[objid];
clr = make_float4(c.x, c.y, c.z, 1);
clr /= 255.0f;
}
else {
clr = make_float4(0, 0, 0, 1);
}
}
surf2Dwrite(
clr,
dst,
ix * sizeof(float4), iy,
cudaBoundaryModeTrap);
}
__global__ void pickPixel(
idaten::SVGFPathTracing::PickedInfo* dst,
int32_t ix, int32_t iy,
int32_t width, int32_t height,
const aten::CameraParameter camera,
const idaten::Path paths,
const float4* __restrict__ aovNormalDepth,
const float4* __restrict__ aovTexclrMeshid,
idaten::context ctxt)
{
iy = height - 1 - iy;
float s = (ix + 0.5f) / (float)(camera.width);
float t = (iy + 0.5f) / (float)(camera.height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, &camera, s, t);
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, camsample.r, &isect);
if (isHit) {
const auto idx = getIdx(ix, iy, width);
auto normalDepth = aovNormalDepth[idx];
auto texclrMeshid = aovTexclrMeshid[idx];
dst->ix = ix;
dst->iy = iy;
dst->color = aten::vec3(paths.contrib[idx].contrib.x, paths.contrib[idx].contrib.y, paths.contrib[idx].contrib.z);
dst->normal = aten::vec3(normalDepth.x, normalDepth.y, normalDepth.z);
dst->depth = normalDepth.w;
dst->meshid = (int32_t)texclrMeshid.w;
dst->triid = isect.triangle_id;
dst->mtrlid = isect.mtrlid;
}
else {
dst->ix = -1;
dst->iy = -1;
}
}
namespace idaten
{
void SVGFPathTracing::onDisplayAOV(
cudaSurfaceObject_t outputSurf,
int32_t width, int32_t height)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
int32_t curaov_idx = getCurAovs();
auto& curaov = aov_[curaov_idx];
CudaGLResourceMapper<decltype(m_motionDepthBuffer)> rscmap(m_motionDepthBuffer);
auto gbuffer = m_motionDepthBuffer.bind();
fillAOV << <grid, block >> > (
outputSurf,
m_aovMode,
width, height,
curaov.get<AOVBuffer::NormalDepth>().data(),
curaov.get<AOVBuffer::AlbedoMeshId>().data(),
gbuffer,
m_cam,
ctxt_host_.ctxt);
}
void SVGFPathTracing::pick(
int32_t ix, int32_t iy,
int32_t width, int32_t height)
{
if (m_willPicklPixel) {
m_pick.resize(1);
int32_t curaov_idx = getCurAovs();
auto& curaov = aov_[curaov_idx];
pickPixel << <1, 1 >> > (
m_pick.data(),
m_pickedInfo.ix, m_pickedInfo.iy,
width, height,
m_cam,
path_host_->paths,
curaov.get<AOVBuffer::NormalDepth>().data(),
curaov.get<AOVBuffer::AlbedoMeshId>().data(),
ctxt_host_.ctxt);
m_pick.readFromDeviceToHostByNum(&m_pickedInfo);
m_willPicklPixel = false;
}
}
}
|
e4406b836801b44de1b6a33e87e091d1bb506fb3.hip
|
// !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too. (particleArrays and gridCellIndices)
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // - buffer containing a pointer for each boid to
// its data in dev_pos and dev_vel1 and dev_vel2
int *dev_particleGridIndices; // - buffer containing the grid index of each boid
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // - buffer containing a pointer for each cell to
// the beginning of its data in dev_particleArrayIndices
int *dev_gridCellEndIndices; // - buffer containing a pointer for each cell to
// the end of its data in dev_particleArrayIndices
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// HB - same as its corresponding pos,vel1,vel2 buffers except sorted to match
// the current grid cell index locations
glm::vec3* dev_shuffledPos;
glm::vec3* dev_shuffledVel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount; //gridResolution
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
// TODO-2.3 Allocate additional buffers here.
hipMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledPos failed!");
hipMalloc((void**)&dev_shuffledVel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_shuffledVel1Indices failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* TODO-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 iSelf_position = pos[iSelf];
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int on_index = 0; on_index < N; ++on_index) {
if (on_index == iSelf) { continue; }
glm::vec3 on_pos = pos[on_index];
float distance = glm::distance(iSelf_position, on_pos);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += on_pos;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (iSelf_position - on_pos);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel[on_index];
++neighbors_rule3;
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0) ? (perceived_center_rule1 / neighbors_rule1 - iSelf_position) * rule1Scale : glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0) ? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale : glm::vec3(0.f);
return adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its velocity based on its current position.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisVelo = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// clamp speed and reupdate
vel2[index] = glm::length(thisVelo) > maxSpeed ? glm::normalize(thisVelo) * maxSpeed : thisVelo;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// TODO-2.1
// - Label each boid with the index of its grid cell. HB zero out origin of grid
glm::ivec3 cell_index_3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(
cell_index_3D.x, cell_index_3D.y, cell_index_3D.z, gridResolution);
// Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2 - HB fill in the array dev_particleArrayIndices
// for what each indices[index] points to which gridIndices[index] value
// since initializing - in same order.
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
// HB - indexing is only first inclusive [start, end).
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
int current_grid_index = particleGridIndices[particle_index];
// starting edge case
if (particle_index == 0) {
gridCellStartIndices[current_grid_index] = particle_index;
return;
}
// general case
int previous_grid_index = particleGridIndices[particle_index - 1];
if (current_grid_index != previous_grid_index) {
gridCellStartIndices[current_grid_index] = particle_index;
gridCellEndIndices[previous_grid_index] = particle_index;
}
// ending edge case
if (particle_index == N - 1) {
gridCellEndIndices[current_grid_index] = particle_index + 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
glm::vec3 particle_position = pos[particle_index];
glm::ivec3 current_cell_index_3D = (particle_position - gridMin) * inverseCellWidth;
// Identify which cells may contain neighbors.
float max_distance_val = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
glm::vec3 max_distance(max_distance_val);
glm::vec3 zeroed_particle_position = particle_position - gridMin;
glm::vec3 min_cell_index_3D = (zeroed_particle_position - max_distance) * inverseCellWidth;
glm::vec3 max_cell_index_3D = (zeroed_particle_position + max_distance) * inverseCellWidth;
// clamp 3D cell index bounds (not wrapping here)
glm::vec3 grid_min_index(0);
glm::vec3 grid_max_index(gridResolution);
glm::ivec3 grid_min_3D = glm::clamp(min_cell_index_3D, grid_min_index, grid_max_index);
glm::ivec3 grid_max_3D = glm::clamp(max_cell_index_3D, grid_min_index, grid_max_index);
// Update particle velocity based on neighboring boids
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int z = grid_min_3D.z; z <= grid_max_3D.z; ++z) {
for (int y = grid_min_3D.y; y <= grid_max_3D.y; ++y) {
for (int x = grid_min_3D.x; x <= grid_max_3D.x; ++x) {
int checking_cell_index_1D = gridIndex3Dto1D(x, y, z, gridResolution);
int start_boid_index = gridCellStartIndices[checking_cell_index_1D];
int end_boid_index = gridCellEndIndices[checking_cell_index_1D];
if (start_boid_index < 0 || start_boid_index >= N || end_boid_index < 0 || end_boid_index >= N) {
continue;
}
for (int b = start_boid_index; b < end_boid_index; ++b) {
int on_boid = particleArrayIndices[b];
if (on_boid == particle_index) { continue; }
glm::vec3 boid_position = pos[on_boid];
float distance = glm::distance(particle_position, boid_position);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += boid_position;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (particle_position - boid_position);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel1[on_boid];
++neighbors_rule3;
}
} // end: iterating over all boids in a cell
}
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0)
? (perceived_center_rule1 / neighbors_rule1 - particle_position) * rule1Scale
: glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0)
? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale
: glm::vec3(0.f);
// clamp and update
glm::vec3 updated_velocity = vel1[particle_index]
+ adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
vel2[particle_index] = (glm::length(updated_velocity) > maxSpeed)
? glm::normalize(updated_velocity) * maxSpeed
: updated_velocity;
}
__global__ void kernShuffleBuffer(int N, int *particleArrayIndices, glm::vec3* original_ordering, glm::vec3* shuffled_ordering) {
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
// swapping v1 and v2 while also sorting appropriately
shuffled_ordering[particle_index] = original_ordering[particleArrayIndices[particle_index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
glm::vec3 particle_position = pos[particle_index];
glm::ivec3 current_cell_index_3D = (particle_position - gridMin) * inverseCellWidth;
// Identify which cells may contain neighbors.
float max_distance_val = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
glm::vec3 max_distance(max_distance_val);
glm::vec3 zeroed_particle_position = particle_position - gridMin;
glm::vec3 min_cell_index_3D = (zeroed_particle_position - max_distance) * inverseCellWidth;
glm::vec3 max_cell_index_3D = (zeroed_particle_position + max_distance) * inverseCellWidth;
// clamp 3D cell index bounds (not wrapping here)
glm::vec3 grid_min_index(0);
glm::vec3 grid_max_index(gridResolution);
glm::ivec3 grid_min_3D = glm::clamp(min_cell_index_3D, grid_min_index, grid_max_index);
glm::ivec3 grid_max_3D = glm::clamp(max_cell_index_3D, grid_min_index, grid_max_index);
// Update particle velocity based on neighboring boids
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int z = grid_min_3D.z; z <= grid_max_3D.z; ++z) {
for (int y = grid_min_3D.y; y <= grid_max_3D.y; ++y) {
for (int x = grid_min_3D.x; x <= grid_max_3D.x; ++x) {
int checking_cell_index_1D = gridIndex3Dto1D(x, y, z, gridResolution);
int start_boid_index = gridCellStartIndices[checking_cell_index_1D];
int end_boid_index = gridCellEndIndices[checking_cell_index_1D];
if (start_boid_index < 0 || start_boid_index >= N || end_boid_index < 0 || end_boid_index >= N) {
continue;
}
for (int b = start_boid_index; b < end_boid_index; ++b) {
if (b == particle_index) { continue; }
glm::vec3 boid_position = pos[b];
float distance = glm::distance(particle_position, boid_position);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += boid_position;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (particle_position - boid_position);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel1[b];
++neighbors_rule3;
}
} // end: iterating over all boids in a cell
}
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0)
? (perceived_center_rule1 / neighbors_rule1 - particle_position) * rule1Scale
: glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0)
? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale
: glm::vec3(0.f);
// clamp and update
glm::vec3 updated_velocity = vel1[particle_index]
+ adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
vel2[particle_index] = (glm::length(updated_velocity) > maxSpeed)
? glm::normalize(updated_velocity) * maxSpeed
: updated_velocity;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Use the kernels to step the simulation forward in time.
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(blocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
hipLaunchKernelGGL(( kernUpdatePos), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Ping-pong/swap the velocity buffers, so now have calculated updated velocity as current
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos brute force failed!");
// - Ping-pong buffers as needed
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
hipLaunchKernelGGL(( kernComputeIndices), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(blocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_gridCellStartIndices, -1);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(blocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_gridCellEndIndices, -1);
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(blocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// HB need separate buffers bc cant write to same location as other threads
hipLaunchKernelGGL(( kernShuffleBuffer), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dev_particleArrayIndices, dev_pos, dev_shuffledPos);
hipLaunchKernelGGL(( kernShuffleBuffer), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dev_particleArrayIndices, dev_vel1, dev_shuffledVel1);
// HB put ordering back in appropriate buffers
hipDeviceSynchronize();
hipMemcpy(dev_pos, dev_shuffledPos, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel1, dev_shuffledVel1, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos brute force failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
// TODO-2.3 - Free any additional buffers here.
hipFree(dev_shuffledPos);
hipFree(dev_shuffledVel1);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
|
e4406b836801b44de1b6a33e87e091d1bb506fb3.cu
|
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too. (particleArrays and gridCellIndices)
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // - buffer containing a pointer for each boid to
// its data in dev_pos and dev_vel1 and dev_vel2
int *dev_particleGridIndices; // - buffer containing the grid index of each boid
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // - buffer containing a pointer for each cell to
// the beginning of its data in dev_particleArrayIndices
int *dev_gridCellEndIndices; // - buffer containing a pointer for each cell to
// the end of its data in dev_particleArrayIndices
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// HB - same as its corresponding pos,vel1,vel2 buffers except sorted to match
// the current grid cell index locations
glm::vec3* dev_shuffledPos;
glm::vec3* dev_shuffledVel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount; //gridResolution
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
// TODO-2.3 Allocate additional buffers here.
cudaMalloc((void**)&dev_shuffledPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledPos failed!");
cudaMalloc((void**)&dev_shuffledVel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_shuffledVel1Indices failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* TODO-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 iSelf_position = pos[iSelf];
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int on_index = 0; on_index < N; ++on_index) {
if (on_index == iSelf) { continue; }
glm::vec3 on_pos = pos[on_index];
float distance = glm::distance(iSelf_position, on_pos);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += on_pos;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (iSelf_position - on_pos);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel[on_index];
++neighbors_rule3;
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0) ? (perceived_center_rule1 / neighbors_rule1 - iSelf_position) * rule1Scale : glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0) ? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale : glm::vec3(0.f);
return adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its velocity based on its current position.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisVelo = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// clamp speed and reupdate
vel2[index] = glm::length(thisVelo) > maxSpeed ? glm::normalize(thisVelo) * maxSpeed : thisVelo;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// TODO-2.1
// - Label each boid with the index of its grid cell. HB zero out origin of grid
glm::ivec3 cell_index_3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(
cell_index_3D.x, cell_index_3D.y, cell_index_3D.z, gridResolution);
// Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2 - HB fill in the array dev_particleArrayIndices
// for what each indices[index] points to which gridIndices[index] value
// since initializing - in same order.
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
// HB - indexing is only first inclusive [start, end).
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
int current_grid_index = particleGridIndices[particle_index];
// starting edge case
if (particle_index == 0) {
gridCellStartIndices[current_grid_index] = particle_index;
return;
}
// general case
int previous_grid_index = particleGridIndices[particle_index - 1];
if (current_grid_index != previous_grid_index) {
gridCellStartIndices[current_grid_index] = particle_index;
gridCellEndIndices[previous_grid_index] = particle_index;
}
// ending edge case
if (particle_index == N - 1) {
gridCellEndIndices[current_grid_index] = particle_index + 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
glm::vec3 particle_position = pos[particle_index];
glm::ivec3 current_cell_index_3D = (particle_position - gridMin) * inverseCellWidth;
// Identify which cells may contain neighbors.
float max_distance_val = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
glm::vec3 max_distance(max_distance_val);
glm::vec3 zeroed_particle_position = particle_position - gridMin;
glm::vec3 min_cell_index_3D = (zeroed_particle_position - max_distance) * inverseCellWidth;
glm::vec3 max_cell_index_3D = (zeroed_particle_position + max_distance) * inverseCellWidth;
// clamp 3D cell index bounds (not wrapping here)
glm::vec3 grid_min_index(0);
glm::vec3 grid_max_index(gridResolution);
glm::ivec3 grid_min_3D = glm::clamp(min_cell_index_3D, grid_min_index, grid_max_index);
glm::ivec3 grid_max_3D = glm::clamp(max_cell_index_3D, grid_min_index, grid_max_index);
// Update particle velocity based on neighboring boids
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int z = grid_min_3D.z; z <= grid_max_3D.z; ++z) {
for (int y = grid_min_3D.y; y <= grid_max_3D.y; ++y) {
for (int x = grid_min_3D.x; x <= grid_max_3D.x; ++x) {
int checking_cell_index_1D = gridIndex3Dto1D(x, y, z, gridResolution);
int start_boid_index = gridCellStartIndices[checking_cell_index_1D];
int end_boid_index = gridCellEndIndices[checking_cell_index_1D];
if (start_boid_index < 0 || start_boid_index >= N || end_boid_index < 0 || end_boid_index >= N) {
continue;
}
for (int b = start_boid_index; b < end_boid_index; ++b) {
int on_boid = particleArrayIndices[b];
if (on_boid == particle_index) { continue; }
glm::vec3 boid_position = pos[on_boid];
float distance = glm::distance(particle_position, boid_position);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += boid_position;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (particle_position - boid_position);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel1[on_boid];
++neighbors_rule3;
}
} // end: iterating over all boids in a cell
}
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0)
? (perceived_center_rule1 / neighbors_rule1 - particle_position) * rule1Scale
: glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0)
? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale
: glm::vec3(0.f);
// clamp and update
glm::vec3 updated_velocity = vel1[particle_index]
+ adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
vel2[particle_index] = (glm::length(updated_velocity) > maxSpeed)
? glm::normalize(updated_velocity) * maxSpeed
: updated_velocity;
}
__global__ void kernShuffleBuffer(int N, int *particleArrayIndices, glm::vec3* original_ordering, glm::vec3* shuffled_ordering) {
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
// swapping v1 and v2 while also sorting appropriately
shuffled_ordering[particle_index] = original_ordering[particleArrayIndices[particle_index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
int particle_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (particle_index >= N) {
return;
}
glm::vec3 particle_position = pos[particle_index];
glm::ivec3 current_cell_index_3D = (particle_position - gridMin) * inverseCellWidth;
// Identify which cells may contain neighbors.
float max_distance_val = glm::max(rule1Distance, glm::max(rule2Distance, rule3Distance));
glm::vec3 max_distance(max_distance_val);
glm::vec3 zeroed_particle_position = particle_position - gridMin;
glm::vec3 min_cell_index_3D = (zeroed_particle_position - max_distance) * inverseCellWidth;
glm::vec3 max_cell_index_3D = (zeroed_particle_position + max_distance) * inverseCellWidth;
// clamp 3D cell index bounds (not wrapping here)
glm::vec3 grid_min_index(0);
glm::vec3 grid_max_index(gridResolution);
glm::ivec3 grid_min_3D = glm::clamp(min_cell_index_3D, grid_min_index, grid_max_index);
glm::ivec3 grid_max_3D = glm::clamp(max_cell_index_3D, grid_min_index, grid_max_index);
// Update particle velocity based on neighboring boids
glm::vec3 perceived_center_rule1(0.f);
glm::vec3 adhesion_velocity_rule1(0.f);
glm::vec3 dodging_velocity_rule2(0.f);
glm::vec3 cohesion_velocity_rule3(0.f);
float neighbors_rule1 = 0.f;
float neighbors_rule3 = 0.f;
for (int z = grid_min_3D.z; z <= grid_max_3D.z; ++z) {
for (int y = grid_min_3D.y; y <= grid_max_3D.y; ++y) {
for (int x = grid_min_3D.x; x <= grid_max_3D.x; ++x) {
int checking_cell_index_1D = gridIndex3Dto1D(x, y, z, gridResolution);
int start_boid_index = gridCellStartIndices[checking_cell_index_1D];
int end_boid_index = gridCellEndIndices[checking_cell_index_1D];
if (start_boid_index < 0 || start_boid_index >= N || end_boid_index < 0 || end_boid_index >= N) {
continue;
}
for (int b = start_boid_index; b < end_boid_index; ++b) {
if (b == particle_index) { continue; }
glm::vec3 boid_position = pos[b];
float distance = glm::distance(particle_position, boid_position);
// Rule 1: Boids try to fly towards the center of mass of neighboring boids
if (distance < rule1Distance) {
perceived_center_rule1 += boid_position;
++neighbors_rule1;
}
// Rule 2: Boids try to keep a small distance away from other objects (including other boids).
if (distance < rule2Distance) {
dodging_velocity_rule2 += (particle_position - boid_position);
}
// Rule 3: Boids try to match velocity with near boids.
if (distance < rule3Distance) {
cohesion_velocity_rule3 += vel1[b];
++neighbors_rule3;
}
} // end: iterating over all boids in a cell
}
}
}
// final updates before summing
adhesion_velocity_rule1 = (neighbors_rule1 > 0)
? (perceived_center_rule1 / neighbors_rule1 - particle_position) * rule1Scale
: glm::vec3(0.f);
dodging_velocity_rule2 *= rule2Scale;
cohesion_velocity_rule3 = (neighbors_rule3 > 0)
? cohesion_velocity_rule3 / neighbors_rule3 * rule3Scale
: glm::vec3(0.f);
// clamp and update
glm::vec3 updated_velocity = vel1[particle_index]
+ adhesion_velocity_rule1 + dodging_velocity_rule2 + cohesion_velocity_rule3;
vel2[particle_index] = (glm::length(updated_velocity) > maxSpeed)
? glm::normalize(updated_velocity) * maxSpeed
: updated_velocity;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Use the kernels to step the simulation forward in time.
kernUpdateVelocityBruteForce<<<blocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos<<<blocksPerGrid, threadsPerBlock>>>(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Ping-pong/swap the velocity buffers, so now have calculated updated velocity as current
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices<<<blocksPerGrid, threadsPerBlock >>>(
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer<<<blocksPerGrid, threadsPerBlock >>>(numObjects, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernResetIntBuffer<<<blocksPerGrid, threadsPerBlock >>>(numObjects, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernIdentifyCellStartEnd<<<blocksPerGrid, threadsPerBlock>>>(
numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered<<<blocksPerGrid, threadsPerBlock>>>(
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos<<<blocksPerGrid, threadsPerBlock>>>(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos brute force failed!");
// - Ping-pong buffers as needed
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices<<<blocksPerGrid, threadsPerBlock>>>(
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernResetIntBuffer<<<blocksPerGrid, blockSize>>>(numObjects, dev_gridCellStartIndices, -1);
kernResetIntBuffer<<<blocksPerGrid, blockSize>>>(numObjects, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd<<<blocksPerGrid, blockSize>>>(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// HB need separate buffers bc cant write to same location as other threads
kernShuffleBuffer<<<blocksPerGrid, threadsPerBlock>>>(numObjects, dev_particleArrayIndices, dev_pos, dev_shuffledPos);
kernShuffleBuffer<<<blocksPerGrid, threadsPerBlock>>>(numObjects, dev_particleArrayIndices, dev_vel1, dev_shuffledVel1);
// HB put ordering back in appropriate buffers
cudaDeviceSynchronize();
cudaMemcpy(dev_pos, dev_shuffledPos, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel1, dev_shuffledVel1, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent<<<blocksPerGrid, threadsPerBlock>>>(
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos<<<blocksPerGrid, threadsPerBlock>>>(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos brute force failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
// TODO-2.3 - Free any additional buffers here.
cudaFree(dev_shuffledPos);
cudaFree(dev_shuffledVel1);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
9ba8f788a4e97470f66956242016d0123ee96b57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/imgproc/jpeg/jpeg_distortion_gpu_kernel.h"
#include "dali/kernels/imgproc/jpeg/jpeg_distortion_gpu_impl.cuh"
#include "dali/core/static_switch.h"
namespace dali {
namespace kernels {
namespace jpeg {
KernelRequirements JpegDistortionBaseGPU::Setup(KernelContext &ctx,
const TensorListShape<3> &in_shape,
bool horz_subsample, bool vert_subsample) {
horz_subsample_ = horz_subsample;
vert_subsample_ = vert_subsample;
KernelRequirements req;
ScratchpadEstimator se;
int nsamples = in_shape.num_samples();
se.add<SampleDesc>(AllocType::Host, nsamples);
se.add<SampleDesc>(AllocType::GPU, nsamples);
chroma_shape_.resize(nsamples);
for (int i = 0; i < nsamples; i++) {
auto chroma_sh = chroma_shape_.tensor_shape_span(i);
auto sh = in_shape.tensor_shape_span(i);
// used to generate logical blocks (one thread per chroma pixel)
chroma_sh[0] = div_ceil(sh[0], 1 + vert_subsample_);
chroma_sh[1] = div_ceil(sh[1], 1 + horz_subsample_);
}
block_setup_.SetBlockDim(dim3(32, 16, 1));
int xblock = 64 * (2 - horz_subsample_);
int yblock = 128;
block_setup_.SetDefaultBlockSize({xblock, yblock});
block_setup_.SetupBlocks(chroma_shape_, true);
int nblocks = block_setup_.Blocks().size();
se.add<BlockDesc>(AllocType::GPU, nblocks);
req.scratch_sizes = se.sizes;
req.output_shapes = {in_shape};
return req;
}
void JpegDistortionBaseGPU::SetupSampleDescs(const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in,
span<const int> quality) {
const auto &in_shape = in.shape;
int nsamples = in_shape.num_samples();
sample_descs_.resize(nsamples);
for (int i = 0; i < nsamples; i++) {
auto &sample_desc = sample_descs_[i];
auto in_sh = in_shape.tensor_shape_span(i);
auto width = in_sh[1];
auto height = in_sh[0];
sample_desc.in = in[i].data;
sample_desc.out = out[i].data;
sample_desc.size.x = width;
sample_desc.size.y = height;
sample_desc.strides.x = 3;
sample_desc.strides.y = width * 3;
int q;
if (quality.empty()) {
q = 95;
} else if (quality.size() == 1) {
q = quality[0];
} else {
q = quality[i];
}
sample_desc.luma_Q_table = GetLumaQuantizationTable(q);
sample_desc.chroma_Q_table = GetChromaQuantizationTable(q);
}
}
void JpegCompressionDistortionGPU::Run(KernelContext &ctx, const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in, span<const int> quality) {
const auto &in_shape = in.shape;
int nsamples = in_shape.num_samples();
if (quality.size() > 1 && quality.size() != nsamples) {
throw std::invalid_argument(
make_string("Unexpected number of elements in ``quality`` argument. "
"The argument could contain a single value (used for the whole batch), "
"one value per sample, or no values (a default is used). Received ",
quality.size(), " values but batch size is ", nsamples, "."));
}
SetupSampleDescs(out, in, quality);
SampleDesc *samples_gpu;
BlockDesc *blocks_gpu;
std::tie(samples_gpu, blocks_gpu) = ctx.scratchpad->ToContiguousGPU(
ctx.gpu.stream, make_cspan(sample_descs_), block_setup_.Blocks());
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
VALUE_SWITCH(horz_subsample_ ? 1 : 0, HorzSubsample, (false, true), (
VALUE_SWITCH(vert_subsample_ ? 1 : 0, VertSubsample, (false, true), (
hipLaunchKernelGGL(( JpegCompressionDistortion<HorzSubsample, VertSubsample>)
, dim3(grid_dim), dim3(block_dim), 0, ctx.gpu.stream, samples_gpu, blocks_gpu);
), ()); // NOLINT
), ()); // NOLINT
CUDA_CALL(hipGetLastError());
}
void ChromaSubsampleDistortionGPU::Run(KernelContext &ctx, const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in) {
SetupSampleDescs(out, in);
SampleDesc *samples_gpu;
BlockDesc *blocks_gpu;
std::tie(samples_gpu, blocks_gpu) = ctx.scratchpad->ToContiguousGPU(
ctx.gpu.stream, make_cspan(sample_descs_), block_setup_.Blocks());
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
VALUE_SWITCH(horz_subsample_ ? 1 : 0, HorzSubsample, (false, true), (
VALUE_SWITCH(vert_subsample_ ? 1 : 0, VertSubsample, (false, true), (
hipLaunchKernelGGL(( ChromaSubsampleDistortion<HorzSubsample, VertSubsample>)
, dim3(grid_dim), dim3(block_dim), 0, ctx.gpu.stream, samples_gpu, blocks_gpu);
), ()); // NOLINT
), ()); // NOLINT
CUDA_CALL(hipGetLastError());
}
} // namespace jpeg
} // namespace kernels
} // namespace dali
|
9ba8f788a4e97470f66956242016d0123ee96b57.cu
|
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/imgproc/jpeg/jpeg_distortion_gpu_kernel.h"
#include "dali/kernels/imgproc/jpeg/jpeg_distortion_gpu_impl.cuh"
#include "dali/core/static_switch.h"
namespace dali {
namespace kernels {
namespace jpeg {
KernelRequirements JpegDistortionBaseGPU::Setup(KernelContext &ctx,
const TensorListShape<3> &in_shape,
bool horz_subsample, bool vert_subsample) {
horz_subsample_ = horz_subsample;
vert_subsample_ = vert_subsample;
KernelRequirements req;
ScratchpadEstimator se;
int nsamples = in_shape.num_samples();
se.add<SampleDesc>(AllocType::Host, nsamples);
se.add<SampleDesc>(AllocType::GPU, nsamples);
chroma_shape_.resize(nsamples);
for (int i = 0; i < nsamples; i++) {
auto chroma_sh = chroma_shape_.tensor_shape_span(i);
auto sh = in_shape.tensor_shape_span(i);
// used to generate logical blocks (one thread per chroma pixel)
chroma_sh[0] = div_ceil(sh[0], 1 + vert_subsample_);
chroma_sh[1] = div_ceil(sh[1], 1 + horz_subsample_);
}
block_setup_.SetBlockDim(dim3(32, 16, 1));
int xblock = 64 * (2 - horz_subsample_);
int yblock = 128;
block_setup_.SetDefaultBlockSize({xblock, yblock});
block_setup_.SetupBlocks(chroma_shape_, true);
int nblocks = block_setup_.Blocks().size();
se.add<BlockDesc>(AllocType::GPU, nblocks);
req.scratch_sizes = se.sizes;
req.output_shapes = {in_shape};
return req;
}
void JpegDistortionBaseGPU::SetupSampleDescs(const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in,
span<const int> quality) {
const auto &in_shape = in.shape;
int nsamples = in_shape.num_samples();
sample_descs_.resize(nsamples);
for (int i = 0; i < nsamples; i++) {
auto &sample_desc = sample_descs_[i];
auto in_sh = in_shape.tensor_shape_span(i);
auto width = in_sh[1];
auto height = in_sh[0];
sample_desc.in = in[i].data;
sample_desc.out = out[i].data;
sample_desc.size.x = width;
sample_desc.size.y = height;
sample_desc.strides.x = 3;
sample_desc.strides.y = width * 3;
int q;
if (quality.empty()) {
q = 95;
} else if (quality.size() == 1) {
q = quality[0];
} else {
q = quality[i];
}
sample_desc.luma_Q_table = GetLumaQuantizationTable(q);
sample_desc.chroma_Q_table = GetChromaQuantizationTable(q);
}
}
void JpegCompressionDistortionGPU::Run(KernelContext &ctx, const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in, span<const int> quality) {
const auto &in_shape = in.shape;
int nsamples = in_shape.num_samples();
if (quality.size() > 1 && quality.size() != nsamples) {
throw std::invalid_argument(
make_string("Unexpected number of elements in ``quality`` argument. "
"The argument could contain a single value (used for the whole batch), "
"one value per sample, or no values (a default is used). Received ",
quality.size(), " values but batch size is ", nsamples, "."));
}
SetupSampleDescs(out, in, quality);
SampleDesc *samples_gpu;
BlockDesc *blocks_gpu;
std::tie(samples_gpu, blocks_gpu) = ctx.scratchpad->ToContiguousGPU(
ctx.gpu.stream, make_cspan(sample_descs_), block_setup_.Blocks());
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
VALUE_SWITCH(horz_subsample_ ? 1 : 0, HorzSubsample, (false, true), (
VALUE_SWITCH(vert_subsample_ ? 1 : 0, VertSubsample, (false, true), (
JpegCompressionDistortion<HorzSubsample, VertSubsample>
<<<grid_dim, block_dim, 0, ctx.gpu.stream>>>(samples_gpu, blocks_gpu);
), ()); // NOLINT
), ()); // NOLINT
CUDA_CALL(cudaGetLastError());
}
void ChromaSubsampleDistortionGPU::Run(KernelContext &ctx, const OutListGPU<uint8_t, 3> &out,
const InListGPU<uint8_t, 3> &in) {
SetupSampleDescs(out, in);
SampleDesc *samples_gpu;
BlockDesc *blocks_gpu;
std::tie(samples_gpu, blocks_gpu) = ctx.scratchpad->ToContiguousGPU(
ctx.gpu.stream, make_cspan(sample_descs_), block_setup_.Blocks());
dim3 grid_dim = block_setup_.GridDim();
dim3 block_dim = block_setup_.BlockDim();
VALUE_SWITCH(horz_subsample_ ? 1 : 0, HorzSubsample, (false, true), (
VALUE_SWITCH(vert_subsample_ ? 1 : 0, VertSubsample, (false, true), (
ChromaSubsampleDistortion<HorzSubsample, VertSubsample>
<<<grid_dim, block_dim, 0, ctx.gpu.stream>>>(samples_gpu, blocks_gpu);
), ()); // NOLINT
), ()); // NOLINT
CUDA_CALL(cudaGetLastError());
}
} // namespace jpeg
} // namespace kernels
} // namespace dali
|
c7d5ea7e6884f6dd0671ef14101c331b47ee98bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#undef _GLIBCXX_USE_INT128
#include <thrust/functional.h>
#include <thrust/transform_scan.h>
#include <thrust/count.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "psc_cuda.h"
#include "particles_cuda.h"
#include <b40c/radixsort_scanscatter_kernel4.h>
using namespace b40c_thrust;
typedef unsigned int K;
typedef unsigned int V;
static const int RADIX_BITS = 4;
struct count_if_equal : public thrust::unary_function<unsigned int, unsigned int> {
const unsigned int value;
__device__ __host__ count_if_equal(unsigned int _value) : value(_value) { }
__device__ __host__ unsigned int operator()(unsigned int value_in) {
return value_in == value;
}
};
EXTERN_C int
cuda_exclusive_scan_2(struct psc_particles *prts, unsigned int *_d_vals,
unsigned int *_d_sums)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
thrust::device_ptr<unsigned int> d_vals(_d_vals);
thrust::device_ptr<unsigned int> d_sums(_d_sums);
count_if_equal unary_op(cuda->nr_blocks);
thrust::transform_exclusive_scan(d_vals, d_vals + prts->n_part, d_sums, unary_op,
0, thrust::plus<unsigned int>());
// OPT, don't mv to host
int sum = d_sums[prts->n_part - 1] + (d_vals[prts->n_part - 1] == cuda->nr_blocks);
return sum;
}
EXTERN_C int
_cuda_exclusive_scan_2(struct psc_particles *prts, unsigned int *d_bidx,
unsigned int *d_sums)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
unsigned int *bidx = new unsigned int[prts->n_part];
unsigned int *sums = new unsigned int[prts->n_part];
check(hipMemcpy(bidx, d_bidx, prts->n_part * sizeof(*bidx),
hipMemcpyDeviceToHost));
unsigned int sum = 0;
for (int i = 0; i < prts->n_part; i++) {
sums[i] = sum;
sum += (bidx[i] == cuda->nr_blocks ? 1 : 0);
}
check(hipMemcpy(d_sums, sums, prts->n_part * sizeof(*d_sums),
hipMemcpyHostToDevice));
delete[] sums;
delete[] bidx;
return sum;
}
void
cuda_mprts_find_n_send(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
int nr_total_blocks = mprts_cuda->nr_total_blocks;
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::host_vector<unsigned int> h_spine_sums(nr_total_blocks + 1);
thrust::copy(d_spine_sums + nr_total_blocks * 10,
d_spine_sums + nr_total_blocks * 11 + 1,
h_spine_sums.begin());
unsigned int off = 0;
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
unsigned int n_send = h_spine_sums[(p + 1) * mprts_cuda->nr_blocks];
cuda->bnd_n_send = n_send - off;
off = n_send;
}
mprts_cuda->nr_prts_send = off;
}
// ======================================================================
// cuda_mprts_reorder_send_by_id
static void __global__
mprts_reorder_send_by_id(unsigned int nr_prts_send, unsigned int *d_xchg_ids,
float4 *d_xi4, float4 *d_pxi4,
float4 *d_xchg_xi4, float4 *d_xchg_pxi4)
{
int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (n >= nr_prts_send) {
return;
}
unsigned int id = d_xchg_ids[n];
d_xchg_xi4[n] = d_xi4[id];
d_xchg_pxi4[n] = d_pxi4[id];
}
void
cuda_mprts_reorder_send_by_id(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
int dimGrid = (mprts_cuda->nr_prts_send + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( mprts_reorder_send_by_id), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0,
mprts_cuda->nr_prts_send, mprts_cuda->d_ids + mprts_cuda->nr_prts - mprts_cuda->nr_prts_send,
mprts_cuda->d_xi4, mprts_cuda->d_pxi4,
mprts_cuda->d_xi4 + mprts_cuda->nr_prts, mprts_cuda->d_pxi4 + mprts_cuda->nr_prts);
}
void
cuda_mprts_reorder_send_by_id_gold(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
thrust::device_ptr<unsigned int> d_ids(mprts_cuda->d_ids);
thrust::device_ptr<float4> d_xi4(mprts_cuda->d_xi4);
thrust::device_ptr<float4> d_pxi4(mprts_cuda->d_pxi4);
thrust::host_vector<unsigned int> h_ids(d_ids, d_ids + mprts_cuda->nr_prts);
thrust::host_vector<float4> h_xi4(d_xi4, d_xi4 + mprts_cuda->nr_prts + mprts_cuda->nr_prts_send);
thrust::host_vector<float4> h_pxi4(d_pxi4, d_pxi4 + mprts_cuda->nr_prts + mprts_cuda->nr_prts_send);
for (int n = 0; n < mprts_cuda->nr_prts_send; n++) {
unsigned int id = h_ids[mprts_cuda->nr_prts - mprts_cuda->nr_prts_send + n];
h_xi4[mprts_cuda->nr_prts + n] = h_xi4[id];
h_pxi4[mprts_cuda->nr_prts + n] = h_pxi4[id];
}
thrust::copy(h_xi4.begin(), h_xi4.end(), d_xi4);
thrust::copy(h_pxi4.begin(), h_pxi4.end(), d_pxi4);
}
// ======================================================================
// cuda_mprts_scan_send_buf_total
void
cuda_mprts_scan_send_buf_total_gold(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int nr_total_blocks = mprts_cuda->nr_total_blocks;
thrust::device_ptr<unsigned int> d_bidx(mprts_cuda->d_bidx);
thrust::device_ptr<unsigned int> d_sums(mprts_cuda->d_sums);
thrust::device_ptr<unsigned int> d_off(mprts_cuda->d_off);
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::host_vector<unsigned int> h_off(d_off, d_off + nr_total_blocks + 1);
thrust::host_vector<unsigned int> h_bidx(d_bidx, d_bidx + mprts_cuda->nr_prts);
thrust::host_vector<unsigned int> h_sums(d_sums, d_sums + mprts_cuda->nr_prts);
for (unsigned int bid = 0; bid < nr_total_blocks; bid++) {
unsigned int sum = d_spine_sums[nr_total_blocks * 10 + bid];
for (int n = h_off[bid]; n < h_off[bid+1]; n++) {
if (h_bidx[n] == CUDA_BND_S_OOB) {
h_sums[n] = sum;
sum++;
}
}
}
thrust::copy(h_sums.begin(), h_sums.end(), d_sums);
cuda_mprts_reorder_send_buf_total(mprts);
}
void
cuda_mprts_scan_send_buf_total(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int nr_total_blocks = mprts_cuda->nr_total_blocks;
int *b_mx = mprts_cuda->b_mx;
// OPT, we could do this from the beginning and adapt find_n_send()
thrust::device_ptr<unsigned int> d_spine_cnts(mprts_cuda->d_bnd_spine_cnts);
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::exclusive_scan(d_spine_cnts + nr_total_blocks * 10,
d_spine_cnts + nr_total_blocks * 11 + 1,
d_spine_sums + nr_total_blocks * 10,
mprts_cuda->nr_prts - mprts_cuda->nr_prts_send);
// OPT, we could somehow not fill in ids for not oob at all
// this should make sure at least those within bounds don't screw anything up
thrust::fill(d_spine_sums, d_spine_sums + nr_total_blocks * 10, 0);
if (b_mx[0] == 1 && b_mx[1] == 8 && b_mx[2] == 8) {
hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
8, 8>)
, dim3(nr_total_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0,
mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 16 && b_mx[2] == 16) {
hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
16, 16>)
, dim3(nr_total_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0,
mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 32 && b_mx[2] == 32) {
hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
32, 32>)
, dim3(nr_total_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0,
mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 64 && b_mx[2] == 64) {
hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
64, 64>)
, dim3(nr_total_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0,
mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 128 && b_mx[2] == 128) {
hipLaunchKernelGGL(( ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
128, 128>)
, dim3(nr_total_blocks), dim3(B40C_RADIXSORT_THREADS), 0, 0,
mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else {
mprintf("no support for b_mx %d x %d x %d!\n", b_mx[0], b_mx[1], b_mx[2]);
assert(0);
}
cuda_sync_if_enabled();
cuda_mprts_reorder_send_by_id(mprts);
}
|
c7d5ea7e6884f6dd0671ef14101c331b47ee98bd.cu
|
#undef _GLIBCXX_USE_INT128
#include <thrust/functional.h>
#include <thrust/transform_scan.h>
#include <thrust/count.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "psc_cuda.h"
#include "particles_cuda.h"
#include <b40c/radixsort_scanscatter_kernel4.h>
using namespace b40c_thrust;
typedef unsigned int K;
typedef unsigned int V;
static const int RADIX_BITS = 4;
struct count_if_equal : public thrust::unary_function<unsigned int, unsigned int> {
const unsigned int value;
__device__ __host__ count_if_equal(unsigned int _value) : value(_value) { }
__device__ __host__ unsigned int operator()(unsigned int value_in) {
return value_in == value;
}
};
EXTERN_C int
cuda_exclusive_scan_2(struct psc_particles *prts, unsigned int *_d_vals,
unsigned int *_d_sums)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
thrust::device_ptr<unsigned int> d_vals(_d_vals);
thrust::device_ptr<unsigned int> d_sums(_d_sums);
count_if_equal unary_op(cuda->nr_blocks);
thrust::transform_exclusive_scan(d_vals, d_vals + prts->n_part, d_sums, unary_op,
0, thrust::plus<unsigned int>());
// OPT, don't mv to host
int sum = d_sums[prts->n_part - 1] + (d_vals[prts->n_part - 1] == cuda->nr_blocks);
return sum;
}
EXTERN_C int
_cuda_exclusive_scan_2(struct psc_particles *prts, unsigned int *d_bidx,
unsigned int *d_sums)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
unsigned int *bidx = new unsigned int[prts->n_part];
unsigned int *sums = new unsigned int[prts->n_part];
check(cudaMemcpy(bidx, d_bidx, prts->n_part * sizeof(*bidx),
cudaMemcpyDeviceToHost));
unsigned int sum = 0;
for (int i = 0; i < prts->n_part; i++) {
sums[i] = sum;
sum += (bidx[i] == cuda->nr_blocks ? 1 : 0);
}
check(cudaMemcpy(d_sums, sums, prts->n_part * sizeof(*d_sums),
cudaMemcpyHostToDevice));
delete[] sums;
delete[] bidx;
return sum;
}
void
cuda_mprts_find_n_send(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
int nr_total_blocks = mprts_cuda->nr_total_blocks;
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::host_vector<unsigned int> h_spine_sums(nr_total_blocks + 1);
thrust::copy(d_spine_sums + nr_total_blocks * 10,
d_spine_sums + nr_total_blocks * 11 + 1,
h_spine_sums.begin());
unsigned int off = 0;
for (int p = 0; p < mprts->nr_patches; p++) {
struct psc_particles *prts = psc_mparticles_get_patch(mprts, p);
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
unsigned int n_send = h_spine_sums[(p + 1) * mprts_cuda->nr_blocks];
cuda->bnd_n_send = n_send - off;
off = n_send;
}
mprts_cuda->nr_prts_send = off;
}
// ======================================================================
// cuda_mprts_reorder_send_by_id
static void __global__
mprts_reorder_send_by_id(unsigned int nr_prts_send, unsigned int *d_xchg_ids,
float4 *d_xi4, float4 *d_pxi4,
float4 *d_xchg_xi4, float4 *d_xchg_pxi4)
{
int n = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (n >= nr_prts_send) {
return;
}
unsigned int id = d_xchg_ids[n];
d_xchg_xi4[n] = d_xi4[id];
d_xchg_pxi4[n] = d_pxi4[id];
}
void
cuda_mprts_reorder_send_by_id(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
int dimGrid = (mprts_cuda->nr_prts_send + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
mprts_reorder_send_by_id<<<dimGrid, THREADS_PER_BLOCK>>>
(mprts_cuda->nr_prts_send, mprts_cuda->d_ids + mprts_cuda->nr_prts - mprts_cuda->nr_prts_send,
mprts_cuda->d_xi4, mprts_cuda->d_pxi4,
mprts_cuda->d_xi4 + mprts_cuda->nr_prts, mprts_cuda->d_pxi4 + mprts_cuda->nr_prts);
}
void
cuda_mprts_reorder_send_by_id_gold(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
thrust::device_ptr<unsigned int> d_ids(mprts_cuda->d_ids);
thrust::device_ptr<float4> d_xi4(mprts_cuda->d_xi4);
thrust::device_ptr<float4> d_pxi4(mprts_cuda->d_pxi4);
thrust::host_vector<unsigned int> h_ids(d_ids, d_ids + mprts_cuda->nr_prts);
thrust::host_vector<float4> h_xi4(d_xi4, d_xi4 + mprts_cuda->nr_prts + mprts_cuda->nr_prts_send);
thrust::host_vector<float4> h_pxi4(d_pxi4, d_pxi4 + mprts_cuda->nr_prts + mprts_cuda->nr_prts_send);
for (int n = 0; n < mprts_cuda->nr_prts_send; n++) {
unsigned int id = h_ids[mprts_cuda->nr_prts - mprts_cuda->nr_prts_send + n];
h_xi4[mprts_cuda->nr_prts + n] = h_xi4[id];
h_pxi4[mprts_cuda->nr_prts + n] = h_pxi4[id];
}
thrust::copy(h_xi4.begin(), h_xi4.end(), d_xi4);
thrust::copy(h_pxi4.begin(), h_pxi4.end(), d_pxi4);
}
// ======================================================================
// cuda_mprts_scan_send_buf_total
void
cuda_mprts_scan_send_buf_total_gold(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int nr_total_blocks = mprts_cuda->nr_total_blocks;
thrust::device_ptr<unsigned int> d_bidx(mprts_cuda->d_bidx);
thrust::device_ptr<unsigned int> d_sums(mprts_cuda->d_sums);
thrust::device_ptr<unsigned int> d_off(mprts_cuda->d_off);
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::host_vector<unsigned int> h_off(d_off, d_off + nr_total_blocks + 1);
thrust::host_vector<unsigned int> h_bidx(d_bidx, d_bidx + mprts_cuda->nr_prts);
thrust::host_vector<unsigned int> h_sums(d_sums, d_sums + mprts_cuda->nr_prts);
for (unsigned int bid = 0; bid < nr_total_blocks; bid++) {
unsigned int sum = d_spine_sums[nr_total_blocks * 10 + bid];
for (int n = h_off[bid]; n < h_off[bid+1]; n++) {
if (h_bidx[n] == CUDA_BND_S_OOB) {
h_sums[n] = sum;
sum++;
}
}
}
thrust::copy(h_sums.begin(), h_sums.end(), d_sums);
cuda_mprts_reorder_send_buf_total(mprts);
}
void
cuda_mprts_scan_send_buf_total(struct psc_mparticles *mprts)
{
struct psc_mparticles_cuda *mprts_cuda = psc_mparticles_cuda(mprts);
unsigned int nr_total_blocks = mprts_cuda->nr_total_blocks;
int *b_mx = mprts_cuda->b_mx;
// OPT, we could do this from the beginning and adapt find_n_send()
thrust::device_ptr<unsigned int> d_spine_cnts(mprts_cuda->d_bnd_spine_cnts);
thrust::device_ptr<unsigned int> d_spine_sums(mprts_cuda->d_bnd_spine_sums);
thrust::exclusive_scan(d_spine_cnts + nr_total_blocks * 10,
d_spine_cnts + nr_total_blocks * 11 + 1,
d_spine_sums + nr_total_blocks * 10,
mprts_cuda->nr_prts - mprts_cuda->nr_prts_send);
// OPT, we could somehow not fill in ids for not oob at all
// this should make sure at least those within bounds don't screw anything up
thrust::fill(d_spine_sums, d_spine_sums + nr_total_blocks * 10, 0);
if (b_mx[0] == 1 && b_mx[1] == 8 && b_mx[2] == 8) {
ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
8, 8>
<<<nr_total_blocks, B40C_RADIXSORT_THREADS>>>
(mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 16 && b_mx[2] == 16) {
ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
16, 16>
<<<nr_total_blocks, B40C_RADIXSORT_THREADS>>>
(mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 32 && b_mx[2] == 32) {
ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
32, 32>
<<<nr_total_blocks, B40C_RADIXSORT_THREADS>>>
(mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 64 && b_mx[2] == 64) {
ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
64, 64>
<<<nr_total_blocks, B40C_RADIXSORT_THREADS>>>
(mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else if (b_mx[0] == 1 && b_mx[1] == 128 && b_mx[2] == 128) {
ScanScatterDigits4<K, V, 0, RADIX_BITS, 0,
NopFunctor<K>,
NopFunctor<K>,
128, 128>
<<<nr_total_blocks, B40C_RADIXSORT_THREADS>>>
(mprts_cuda->d_bnd_spine_sums, mprts_cuda->d_bidx,
mprts_cuda->d_ids, mprts_cuda->d_off, nr_total_blocks);
} else {
mprintf("no support for b_mx %d x %d x %d!\n", b_mx[0], b_mx[1], b_mx[2]);
assert(0);
}
cuda_sync_if_enabled();
cuda_mprts_reorder_send_by_id(mprts);
}
|
ac41c4bb44b3605944e9eed144e1fbfcac2dcdfe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "nmfcpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *matcp = NULL;
hipMalloc(&matcp, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
nmfcpy), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,matcp,m,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
nmfcpy), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,matcp,m,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
nmfcpy), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,matcp,m,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ac41c4bb44b3605944e9eed144e1fbfcac2dcdfe.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "nmfcpy.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *matcp = NULL;
cudaMalloc(&matcp, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
nmfcpy<<<gridBlock,threadBlock>>>(mat,matcp,m,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
nmfcpy<<<gridBlock,threadBlock>>>(mat,matcp,m,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
nmfcpy<<<gridBlock,threadBlock>>>(mat,matcp,m,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
00f85108f44925dfb0591bdd4ab9855040cd2d89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_occupancy.h> // Occupancy calculator in the CUDA toolkit
// Device code
__global__ void MyKernelEx1(int *d, int *a, int *b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d[idx] = a[idx] * b[idx];
}
__global__ void MyKernelEx2(int *array, int arrayCount) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount) {
array[idx] *= array[idx];
}
}
// Host code
int launchMyKernel(int *array, int arrayCount) {
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
(void*)MyKernelEx2, 0,arrayCount); // Round up according to
array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( MyKernelEx2), dim3(gridSize), dim3(blockSize), 0, 0, array, arrayCount);
hipDeviceSynchronize();
return 0;
// If interested, the occupancy can be calculated with //
// hipOccupancyMaxActiveBlocksPerMultiprocessor
}
int main() {
int numBlocks; // Occupancy in terms of active blocks
int blockSize = 64;
// These variables are used to convert occupancy to warps
int device;
hipDeviceProp_t prop;
int activeWarps;
int maxWarps;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,
MyKernelEx1, blockSize, 0);
activeWarps = numBlocks * blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor /
prop.warpSize;
std::cout << "Num of blocks: " << numBlocks <<
std::endl;
// std::cout << "Max num of warps per SM: " <<
// maxWarps << std::endl;
// std::cout << "Max threads per SM: " <<
// prop.maxThreadsPerMultiProcessor << std::endl;
std::cout << "Occupancy: " << (double)activeWarps
/ maxWarps * 100 << "%" << std::endl;
return 0;
}
|
00f85108f44925dfb0591bdd4ab9855040cd2d89.cu
|
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_occupancy.h> // Occupancy calculator in the CUDA toolkit
// Device code
__global__ void MyKernelEx1(int *d, int *a, int *b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d[idx] = a[idx] * b[idx];
}
__global__ void MyKernelEx2(int *array, int arrayCount) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount) {
array[idx] *= array[idx];
}
}
// Host code
int launchMyKernel(int *array, int arrayCount) {
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
(void*)MyKernelEx2, 0,arrayCount); // Round up according to
array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
MyKernelEx2<<<gridSize, blockSize>>>(array, arrayCount);
cudaDeviceSynchronize();
return 0;
// If interested, the occupancy can be calculated with //
// cudaOccupancyMaxActiveBlocksPerMultiprocessor
}
int main() {
int numBlocks; // Occupancy in terms of active blocks
int blockSize = 64;
// These variables are used to convert occupancy to warps
int device;
cudaDeviceProp prop;
int activeWarps;
int maxWarps;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,
MyKernelEx1, blockSize, 0);
activeWarps = numBlocks * blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor /
prop.warpSize;
std::cout << "Num of blocks: " << numBlocks <<
std::endl;
// std::cout << "Max num of warps per SM: " <<
// maxWarps << std::endl;
// std::cout << "Max threads per SM: " <<
// prop.maxThreadsPerMultiProcessor << std::endl;
std::cout << "Occupancy: " << (double)activeWarps
/ maxWarps * 100 << "%" << std::endl;
return 0;
}
|
6d200e88e01cfcfbd070d268ee475d625f07d9f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#define RGBSIZE 3
using namespace cv;
void noiretblanc( unsigned char * in, unsigned char * out, int colonnes, int lignes ) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if( colonne < colonnes && ligne < lignes ) {
int pos = ligne*colonnes+colonne;
int posG = pos*RGBSIZE;
unsigned char r = in[posG];
unsigned char b = in[posG+1];
unsigned char g = in[posG+2];
out[posG]=0.21f*r+0.71f*g+0.07f*b;
out[posG+1]=0.21f*r+0.71f*g+0.07f*b;
out[posG+2]=0.21f*r+0.71f*g+0.07f*b;
}
}
}
}
void retourner( unsigned char * in, unsigned char * out, int colonnes, int lignes ) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if( colonne < colonnes && ligne < lignes ) {
int pos = RGBSIZE * ( ligne * colonnes + colonne );
int oppose = colonnes*lignes*RGBSIZE - pos;
auto rbis = in[oppose];
auto gbis = in[oppose + 1];
auto bbis = in[oppose + 2];
out[pos] = rbis;
out[pos + 1] = gbis;
out[pos + 2] = bbis;
}
}
}
}
void detectionContours(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
int resultat = p_h + p_g + (-4*pixel) + p_d + p_b ;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
void ameliorationNettete(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
int resultat = -p_h - p_g + (5*pixel) - p_d - p_b ;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
void flou(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_hg = in[RGBSIZE * ((ligne - 1) * colonnes + colonne - 1) + i];
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_hd = in[RGBSIZE * ((ligne - 1) * colonnes + colonne + 1) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_bg = in[RGBSIZE * ((ligne + 1) * colonnes + colonne - 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
unsigned char p_bd = in[RGBSIZE * ((ligne + 1) * colonnes + colonne + 1) + i];
int resultat = (p_hg + p_h + p_hd + p_g + pixel + p_d + p_bg + p_b + p_bd)/9;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("image.jpeg", cv::IMREAD_UNCHANGED );
cv::Mat m_out = m_in;
auto lignes = m_in.rows;
auto colonnes = m_in.cols;
unsigned char * matrice_out;
unsigned char * matrice_in;
hipHostMalloc(&matrice_in,RGBSIZE*lignes*colonnes);
hipHostMalloc(&matrice_out,RGBSIZE*lignes*colonnes);
hipMemcpy( matrice_in, m_in.data, RGBSIZE * lignes * colonnes, hipMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 b( ( colonnes - 1) / t.x + 1 , ( lignes - 1 ) / t.y + 1 );
int i;
std::cout << "Entrez le filtre que vous voulez appliquer (1: Noir et Blanc // 2: Retourner // 3: Detection contours // 4: Amlioration de la nettet // 5: Flouter) : ";
std::cin >> i;
auto start = std::chrono::system_clock::now();
if( i == 1 )
{
noiretblanc(matrice_in, matrice_out, colonnes, lignes);
hipMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, hipMemcpyDeviceToHost );
cv::imwrite( "./resultat/NoirEtBlanc_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_NoirEtBlanc: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 2 )
{
retourner(matrice_in, matrice_out, colonnes, lignes);
hipMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, hipMemcpyDeviceToHost );
cv::imwrite( "./resultat/Retourner_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_Retourner: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 3 )
{
detectionContours(matrice_in, matrice_out, colonnes, lignes);
hipMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, hipMemcpyDeviceToHost );
cv::imwrite( "./resultat/DetectionContours_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_DetectionContours: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 4 )
{
ameliorationNettete(matrice_in, matrice_out, colonnes, lignes);
hipMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, hipMemcpyDeviceToHost );
cv::imwrite( "./resultat/AmeliorationNettete_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_AmeliorationNettete: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 5 )
{
flou(matrice_in, matrice_out, colonnes, lignes);
hipMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, hipMemcpyDeviceToHost );
cv::imwrite( "./resultat/Flouter_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_Flouter: " << tempsexecution.count() << " secondes" << std::endl;
}
else
{
std::cout << "Opration impossible" << std::endl;
auto end = std::chrono::system_clock::now();
}
return 0;
}
|
6d200e88e01cfcfbd070d268ee475d625f07d9f6.cu
|
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#define RGBSIZE 3
using namespace cv;
void noiretblanc( unsigned char * in, unsigned char * out, int colonnes, int lignes ) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if( colonne < colonnes && ligne < lignes ) {
int pos = ligne*colonnes+colonne;
int posG = pos*RGBSIZE;
unsigned char r = in[posG];
unsigned char b = in[posG+1];
unsigned char g = in[posG+2];
out[posG]=0.21f*r+0.71f*g+0.07f*b;
out[posG+1]=0.21f*r+0.71f*g+0.07f*b;
out[posG+2]=0.21f*r+0.71f*g+0.07f*b;
}
}
}
}
void retourner( unsigned char * in, unsigned char * out, int colonnes, int lignes ) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if( colonne < colonnes && ligne < lignes ) {
int pos = RGBSIZE * ( ligne * colonnes + colonne );
int oppose = colonnes*lignes*RGBSIZE - pos;
auto rbis = in[oppose];
auto gbis = in[oppose + 1];
auto bbis = in[oppose + 2];
out[pos] = rbis;
out[pos + 1] = gbis;
out[pos + 2] = bbis;
}
}
}
}
void detectionContours(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
int resultat = p_h + p_g + (-4*pixel) + p_d + p_b ;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
void ameliorationNettete(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
int resultat = -p_h - p_g + (5*pixel) - p_d - p_b ;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
void flou(unsigned char * in, unsigned char * out, int colonnes, int lignes) {
for(int ligne=1; ligne<lignes-1; ligne++)
{
for(int colonne=1; colonne<colonnes-1; colonne++)
{
if (ligne >= 1 && ligne < lignes - 1 && colonne >= 1 && colonne < colonnes - 1)
{
for (int i = 0; i < RGBSIZE; ++i)
{
unsigned char p_hg = in[RGBSIZE * ((ligne - 1) * colonnes + colonne - 1) + i];
unsigned char p_h = in[RGBSIZE * ((ligne - 1) * colonnes + colonne) + i];
unsigned char p_hd = in[RGBSIZE * ((ligne - 1) * colonnes + colonne + 1) + i];
unsigned char p_g = in[RGBSIZE * (ligne * colonnes + colonne - 1) + i];
unsigned char pixel = in[RGBSIZE * (ligne * colonnes + colonne) + i];
unsigned char p_d = in[RGBSIZE * (ligne * colonnes + colonne + 1) + i];
unsigned char p_bg = in[RGBSIZE * ((ligne + 1) * colonnes + colonne - 1) + i];
unsigned char p_b = in[RGBSIZE * ((ligne + 1) * colonnes + colonne) + i];
unsigned char p_bd = in[RGBSIZE * ((ligne + 1) * colonnes + colonne + 1) + i];
int resultat = (p_hg + p_h + p_hd + p_g + pixel + p_d + p_bg + p_b + p_bd)/9;
if (resultat > 255)
{
resultat = 255;
}
if (resultat < 0)
{
resultat = 0;
}
out[RGBSIZE * (ligne * colonnes + colonne) + i] = resultat;
}
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("image.jpeg", cv::IMREAD_UNCHANGED );
cv::Mat m_out = m_in;
auto lignes = m_in.rows;
auto colonnes = m_in.cols;
unsigned char * matrice_out;
unsigned char * matrice_in;
cudaMallocHost(&matrice_in,RGBSIZE*lignes*colonnes);
cudaMallocHost(&matrice_out,RGBSIZE*lignes*colonnes);
cudaMemcpy( matrice_in, m_in.data, RGBSIZE * lignes * colonnes, cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 b( ( colonnes - 1) / t.x + 1 , ( lignes - 1 ) / t.y + 1 );
int i;
std::cout << "Entrez le filtre que vous voulez appliquer (1: Noir et Blanc // 2: Retourner // 3: Detection contours // 4: Amélioration de la netteté // 5: Flouter) : ";
std::cin >> i;
auto start = std::chrono::system_clock::now();
if( i == 1 )
{
noiretblanc(matrice_in, matrice_out, colonnes, lignes);
cudaMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, cudaMemcpyDeviceToHost );
cv::imwrite( "./resultat/NoirEtBlanc_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_NoirEtBlanc: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 2 )
{
retourner(matrice_in, matrice_out, colonnes, lignes);
cudaMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, cudaMemcpyDeviceToHost );
cv::imwrite( "./resultat/Retourner_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_Retourner: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 3 )
{
detectionContours(matrice_in, matrice_out, colonnes, lignes);
cudaMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, cudaMemcpyDeviceToHost );
cv::imwrite( "./resultat/DetectionContours_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_DetectionContours: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 4 )
{
ameliorationNettete(matrice_in, matrice_out, colonnes, lignes);
cudaMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, cudaMemcpyDeviceToHost );
cv::imwrite( "./resultat/AmeliorationNettete_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_AmeliorationNettete: " << tempsexecution.count() << " secondes" << std::endl;
}
else if( i == 5 )
{
flou(matrice_in, matrice_out, colonnes, lignes);
cudaMemcpy( m_out.data, matrice_out,RGBSIZE*lignes * colonnes, cudaMemcpyDeviceToHost );
cv::imwrite( "./resultat/Flouter_Sequentiel.jpeg", m_out );
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> tempsexecution = end - start;
std::cout << "Temps_Flouter: " << tempsexecution.count() << " secondes" << std::endl;
}
else
{
std::cout << "Opération impossible" << std::endl;
auto end = std::chrono::system_clock::now();
}
return 0;
}
|
3d8e634510b2ac6e84b5aa1b908e075d0f32440f.hip
|
// !!! This is a file automatically generated by hipify!!!
/* slip.cu
* GPU Benchmark Immersed Boundary Unstructured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* Elastic force computed using energy-based formulation by Devendran + Peskin
* C.Copos 02/14/2012
*/
/* WHAT VERSION IS THIS? */
/*
*
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include "energy_kernel.cu"
//#include "test_kernel.cu"
#include "test_kernel2.cu"
using namespace std;
const double visc = 1.0f;
const double drag = 1.0f;
const float def = 0.1;
// Lame constants
const double lambda = 1.0;
const double mu = 1.0; // shear modulus (G)
// time
const double TMAX = 0.1f;
const double tstep = 0.0001f;/*0.0001f;*/
//static int tstop = floor(TMAX/tstep);
static int tstop = 1;
// curve constants
const int N = 1024;
const double ri = 1.2f;
const double ds = ri/(N-1);
const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
typedef struct vector{
double x; // x-component
double y; // y-component
} vector;
// 2x2 matrix
typedef struct matrix{
double x1; // term (1,1)
double y1; // term (1,2)
double x2; // term (2,1)
double y2; // term (2,2)
} matrix;
// vertex
typedef struct vertex{
vector ref; // reference coords
vector def; // deformation coords
vector force; // force
int exterior; // 1 if this is a boundary point and 0 if this is an interior point
} vertex;
// triangle
typedef struct triangle{
int A;
int B;
int C;
double def_area; // deformed area
double ref_area; // reformed area
matrix f1; // term 1 of the forcing calculation
matrix f2_0_x; // x-component of term 2 of the forcing calculation for vertex 0
matrix f2_0_y; // y-component of term 2 of the forcing calculation for vertex 0
matrix f2_1_x; // x-component of term 2 of the forcing calculation for vertex 1
matrix f2_1_y; // y-component of term 2 of the forcing calculation for vertex 1
matrix f2_2_x; // x-component of term 2 of the forcing calculation for vertex 2
matrix f2_2_y; // y-component of term 2 of the forcing calculation for vertex 2
double f3; // term 3 of the forcing calculation
} triangle;
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Set up preliminary info per triangle (i.e. reference area or term 3 and term 2)
void ref_info(int Npts, triangle &tr, vertex nodes[]) {
// term 3 (otherwise known as reference area)
tr.ref_area = 0.5*fabs( (nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y) );
tr.f3 = tr.ref_area;
// determinant of S
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
// term 2
tr.f2_0_x.x1 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_x.y1 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_0_x.x2 = 0.0;
tr.f2_0_x.y2 = 0.0;
tr.f2_0_y.x1 = 0.0;
tr.f2_0_y.y1 = 0.0;
tr.f2_0_y.x2 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_y.y2 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_1_x.x1 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_x.y1 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_1_x.x2 = 0.0;
tr.f2_1_x.y2 = 0.0;
tr.f2_1_y.x1 = 0.0;
tr.f2_1_y.y1 = 0.0;
tr.f2_1_y.x2 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_y.y2 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_2_x.x1 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_x.y1 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
tr.f2_2_x.x2 = 0.0;
tr.f2_2_x.y2 = 0.0;
tr.f2_2_y.x1 = 0.0;
tr.f2_2_y.y1 = 0.0;
tr.f2_2_y.x2 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_y.y2 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
}
// Set up deformation specific info per triangle (i.e. deformed area and term 1)
void def_info(int Npts, triangle &tr, vertex nodes[]) {
// deformed area
tr.def_area = 0.5*fabs((nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].def.y-nodes[tr.A].def.y) - (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].def.x-nodes[tr.A].def.x) );
// deformation gradient tensor
matrix a;
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
a.x1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y) );
a.y1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.x-nodes[tr.C].ref.x) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.B].ref.x-nodes[tr.A].ref.x) );
a.x2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.y-nodes[tr.B].ref.y) );
a.y2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x) );
// inverse transpose of deformation gradient tensor (w/o outside normalizers i.e. determinants)
matrix ait;
ait.x1 = a.y2;
ait.y1 = (-1.0)*(a.x2);
ait.x2 = (-1.0)*(a.y1);
ait.y2 = a.x1;
// Cauchy stress tensor
matrix sigma;
// Material displacement gradient tensor ( = deformation gradient tensor - I)
matrix d;
d.x1 = a.x1 - 1.0;
d.y1 = a.y1;
d.x2 = a.x2;
d.y2 = a.y2 - 1.0;
sigma.x1 = lambda*(d.x1+d.y2) + 2.0*mu*d.x1;
sigma.y1 = mu*(d.y1+d.x2);
sigma.x2 = mu*(d.x2+d.y1);
sigma.y2 = lambda*(d.x1+d.y2) + 2.0*mu*d.y2;
// term 1 (otherwise known as 1st Piola-Kirchhoff tensor)
tr.f1.x1 = ( sigma.x1*ait.x1 + sigma.y1*ait.x2 );
tr.f1.y1 = ( sigma.x1*ait.y1 + sigma.y1*ait.y2 );
tr.f1.x2 = ( sigma.x2*ait.x1 + sigma.y2*ait.x2 );
tr.f1.y2 = ( sigma.x2*ait.y1 + sigma.y2*ait.y2 );
}
// Compute velocity vector for all points in the grid
void velocity(int Npts, int Ntris, vertex Nodes[], vector f[], vector v[]) {
int CHUNK = 256;
int mem_size = CHUNK;
for(int i=0; i<Npts; i++) {
v[i].x = 0.0;
v[i].y = 0.0;
}
// Allocate host memory for result (velocity)
// THIS IS UNNECESSARY & I SHOULD CHANGE THIS
double *vx = (double*) malloc(mem_size*16*sizeof(double));
double *vy = (double*) malloc(mem_size*16*sizeof(double));
// Allocate and fill host memory for force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
// Allocate and fill host memory for position
double *xh_at = (double*) malloc(mem_size*sizeof(double));
double *yh_at = (double*) malloc(mem_size*sizeof(double));
double *xh_dueto = (double*) malloc(mem_size*sizeof(double));
double *yh_dueto = (double*) malloc(mem_size*sizeof(double));
double esq = e*e;
hipEvent_t start, stop;
float elapsedTime;
for(int jj=0; jj<Npts/CHUNK; jj++) {
for(int ii=0; ii<Npts/CHUNK; ii++) {
int at_pos_begin = jj*CHUNK;
int dueto_pos_begin = ii*CHUNK;
int dueto_f_begin = (ii)*CHUNK;
//printf("CHUNK: (%d, %d)\n", ii, jj);
//printf("at_pos %d, dueto_pos %d, force %d\n", at_pos_begin, dueto_pos_begin, dueto_f_begin);
// Fill force and velocity vectors
for(int j=0; j<CHUNK; j++) {
fxh[j] = f[j+dueto_f_begin].x;
fyh[j] = f[j+dueto_f_begin].y;
}
for(int j=0; j<CHUNK; j++) {
xh_at[j] = Nodes[j+at_pos_begin].def.x;
yh_at[j] = Nodes[j+at_pos_begin].def.y;
xh_dueto[j] = Nodes[j+dueto_pos_begin].def.x;
yh_dueto[j] = Nodes[j+dueto_pos_begin].def.y;
}
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd_at, *yd_at, *xd_dueto, *yd_dueto, *Fxd, *Fyd, *Vxd, *Vyd;
hipMalloc((void**) &xd_at, mem_size*sizeof(double));
hipMalloc((void**) &yd_at, mem_size*sizeof(double));
hipMalloc((void**) &xd_dueto, mem_size*sizeof(double));
hipMalloc((void**) &yd_dueto, mem_size*sizeof(double));
hipMalloc((void**) &Fxd, mem_size*sizeof(double));
hipMalloc((void**) &Fyd, mem_size*sizeof(double));
hipMalloc((void**) &Vxd, mem_size*16*sizeof(double));
hipMalloc((void**) &Vyd, mem_size*16*sizeof(double));
// Initialize device memory to zero
hipMemset(Vxd, 0x0, mem_size*16*sizeof(double));
hipMemset(Vyd, 0x0, mem_size*16*sizeof(double));
// Copy position and force arrays to allocated device memory locations
hipMemcpy(xd_at, xh_at, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(yd_at, yh_at, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(xd_dueto, xh_dueto, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(yd_dueto, yh_dueto, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fxd, fxh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fyd, fyh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventRecord(start, 0);
// Perform Stokeslet computation
dim3 threads(THREADS, 1);
dim3 grid(BLOCKS, 1);
//printf("Number of threads per block: %d and number of blocks per grid: %d\n", THREADS, BLOCKS);
//SlipKernel<<< grid, threads >>>(xd_at, yd_at, Fxd, Fyd, Vxd, Vyd, visc, e, esq);
hipLaunchKernelGGL(( SlipKernel), dim3(grid), dim3(threads) , 0, 0, xd_at, yd_at, xd_dueto, yd_dueto, Fxd, Fyd, Vxd, Vyd, visc, e, esq);
// Copy the result from device to host
hipMemcpy(vx, Vxd, mem_size*16*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy, Vyd, mem_size*16*sizeof(double), hipMemcpyDeviceToHost);
//for(int k=0; k<(mem_size*16); k++) {
// printf("k: %d (vx, vy) = (%.16f,%.16f)\n", k, vx[k], vy[k]);
//}
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Set velocity
//for(int j=0; j<Npts; j++) {
for(int j=0; j<CHUNK; j++) {
int next_chunk = floor(j/16)*16;
for(int k=0; k<16; k++) {
v[j+at_pos_begin].x += vx[j+next_chunk*15+k*16];
v[j+at_pos_begin].y += vy[j+next_chunk*15+k*16];
}
//printf("chunk: (%d, %d), (vx, vy) = (%.16f, %.16f)\n", ii, jj, v[j+at_pos_begin].x, v[j+at_pos_begin].y);
//printf("loc: %d, (vx, vy) = (%.16f, %.16f)\n", j+at_pos_begin, v[j+at_pos_begin].x, v[j+at_pos_begin].y);
}
//hipEventCreate(&stop);
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
// Report timing
hipEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Clean up
hipFree(xd_at); hipFree(yd_at);
hipFree(xd_dueto); hipFree(yd_dueto);
hipFree(Fxd); hipFree(Fyd);
hipFree(Vxd); hipFree(Vyd);
}
}
for(int k=0; k<Npts; k++) {
printf("(vx, vy) = (%.16f, %.16f)\n", v[k].x, v[k].y);
}
// Clean up host memory
free(xh_at); free(yh_at);
free(xh_dueto); free(yh_dueto);
free(fxh); free(fyh);
free(vx); free(vy);
}
// Progression
void progress(int Npts, int Ntris, vertex Nodes[], triangle Tris[]) {
vector pos_init[Npts];
vector vel[Npts];
vector v[Npts];
vector f[Npts];
double ftime = 0.0f;
// file handling
ofstream f1, f2, f3;
f1.open("initial_pos_conf.txt"); f2.open("final_pos_conf.txt"); f3.open("ref_pos_conf.txt");
// print initial configuration (i.e. with initial deformation as described in startCurve() )
for(int i=0; i<Npts; i++) {
// zero the force
Nodes[i].force.x = 0.0;
Nodes[i].force.y = 0.0;
pos_init[i].x = Nodes[i].def.x;
pos_init[i].y = Nodes[i].def.y;
f1 << pos_init[i].x << " " << pos_init[i].y << endl;
f3 << Nodes[i].ref.x << " " << Nodes[i].ref.y << endl;
}
f1.close();
f3.close();
for(int t=0; t<tstop; t++) {
clock_t fbegin = clock();
float ref_Tarea = 0.0; float def_Tarea = 0.0;
// CYCLE THROUGH TRIANGLES AND COMPUTE FORCES
for(int j=0; j<Ntris; j++) {
//printf("making a call for triangle: (%d, %d, %d)\n", Tris[j].A, Tris[j].B, Tris[j].C);
def_info(Npts, Tris[j], Nodes);
// vertex A
Nodes[Tris[j].A].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_x.x1 + Tris[j].f1.y1*Tris[j].f2_0_x.y1 + Tris[j].f1.x2*Tris[j].f2_0_x.x2 + Tris[j].f1.y2*Tris[j].f2_0_x.y2);
Nodes[Tris[j].A].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_y.x1 + Tris[j].f1.y1*Tris[j].f2_0_y.y1 + Tris[j].f1.x2*Tris[j].f2_0_y.x2 + Tris[j].f1.y2*Tris[j].f2_0_y.y2);
// vertex B
Nodes[Tris[j].B].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_x.x1 + Tris[j].f1.y1*Tris[j].f2_1_x.y1 + Tris[j].f1.x2*Tris[j].f2_1_x.x2 + Tris[j].f1.y2*Tris[j].f2_1_x.y2);
Nodes[Tris[j].B].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_y.x1 + Tris[j].f1.y1*Tris[j].f2_1_y.y1 + Tris[j].f1.x2*Tris[j].f2_1_y.x2 + Tris[j].f1.y2*Tris[j].f2_1_y.y2);
// vertex C
Nodes[Tris[j].C].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_x.x1 + Tris[j].f1.y1*Tris[j].f2_2_x.y1 + Tris[j].f1.x2*Tris[j].f2_2_x.x2 + Tris[j].f1.y2*Tris[j].f2_2_x.y2);
Nodes[Tris[j].C].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_y.x1 + Tris[j].f1.y1*Tris[j].f2_2_y.y1 + Tris[j].f1.x2*Tris[j].f2_2_y.x2 + Tris[j].f1.y2*Tris[j].f2_2_y.y2);
ref_Tarea += Tris[j].ref_area;
def_Tarea += Tris[j].def_area;
}
clock_t fend = clock();
for(int k=0; k<Npts; k++) {
f[k].x = Nodes[k].force.x;
f[k].y = Nodes[k].force.y;
Nodes[k].force.x = 0.0;
Nodes[k].force.y = 0.0;
}
// compute velocity fields
velocity(Npts, Ntris, Nodes, f, vel);
// for each node in unstructured mesh
for(int i=0; i<Npts; i++) {
v[i].x = vel[i].x + f[i].x/drag;
v[i].y = vel[i].y + f[i].y/drag;
//printf("%f %f %f %f %f %f\n", Nodes[i].def.x, Nodes[i].def.y, v[i].x, v[i].y, f[i].x, f[i].y);
Nodes[i].def.x = Nodes[i].def.x + tstep*v[i].x;
Nodes[i].def.y = Nodes[i].def.y + tstep*v[i].y;
if(t==tstop-1) { f2 << Nodes[i].def.x << " " << Nodes[i].def.y << endl; }
}
double fpart = diffclock(fend, fbegin);
ftime = fpart + ftime;
}
f2.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
}
// Draw starting configuration
void startCurve() {
// file handling
ifstream f1;
ifstream f2;
ifstream f3;
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
// determine length
int Npoints = -1;
int Ntris = -1;
string c1;
string c2;
while( !f1.eof() ) {
getline(f1, c1);
Npoints++;
}
f1.close();
while( !f2.eof() ) {
getline(f2, c2);
Ntris++;
}
f2.close();
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
f3.open("data/UnitCircleBoundaryN1024.txt");
vector Nodes[Npoints];
triangle Tris[Ntris];
int Boundary[Npoints];
int counter = 0;
double d1, d2;
while(f1 >> d1 >> d2) {
//printf("(%f, %f)\n", d1, d2);
Nodes[counter].x = d1;
Nodes[counter].y = d2;
counter++;
}
f1.close();
counter = 0;
int i1, i2, i3;
while(f2 >> i1 >> i2 >> i3) {
Tris[counter].A = i1-1;
Tris[counter].B = i2-1;
Tris[counter].C = i3-1;
//printf("[%d %d %d]\n", Tris[counter].A, Tris[counter].B, Tris[counter].C);
counter++;
}
f2.close();
counter = 0;
int ext;
// set all points to interior points
for(int k=0; k<Npoints; k++) {
Boundary[k] = 0;
}
while(f3 >> ext) {
Boundary[ext-1] = 1;
counter++;
}
f3.close();
// output to array of vertices and array of triangles
vertex Points[Npoints];
for(int i=0; i<Npoints; i++) {
Points[i].ref.x = Nodes[i].x;
Points[i].ref.y = Nodes[i].y;
Points[i].exterior = Boundary[i];
// SPECIFY DEFORMATION HERE // Step 0: NO deformation
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 1: LINEAR deformation
///// expansion /////
Points[i].def.x = (1.0 - def)*Nodes[i].x;
Points[i].def.y = (1.0 - def)*Nodes[i].y;
///// shear /////
//Points[i].def.x = Nodes[i].x + lambda*Nodes[i].y;
//Points[i].def.y = Nodes[i].y;
///// vertical stretch /////
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = (1.0 + lambda)*Nodes[i].y;
///// uniaxial extension /////
//Points[i].def.x = lambda*Nodes[i].x;
//Points[i].def.y = (1.0/lambda)*Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 2: NONLINEAR deformation
//Points[i].def.x = lambda*Nodes[i].x*Nodes[i].x;
//Points[i].def.y = Points[i].def.y;
}
for(int j=0; j<Ntris; j++) {
// find vertices
int iA = Tris[j].A; // index of A vertex
int iB = Tris[j].B; // index of B vertex
int iC = Tris[j].C; // index of C vertex
Points[iA].ref.x = Nodes[iA].x;
Points[iA].ref.y = Nodes[iA].y;
Points[iB].ref.x = Nodes[iB].x;
Points[iB].ref.y = Nodes[iB].y;
Points[iC].ref.x = Nodes[iC].x;
Points[iC].ref.y = Nodes[iC].y;
}
for(int k=0; k<Ntris; k++) {
// find forcing terms that remain constant with any deformation and timestep
ref_info(Npoints, Tris[k], Points);
}
progress(Npoints, Ntris, Points, Tris);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
|
3d8e634510b2ac6e84b5aa1b908e075d0f32440f.cu
|
/* slip.cu
* GPU Benchmark Immersed Boundary Unstructured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* Elastic force computed using energy-based formulation by Devendran + Peskin
* C.Copos 02/14/2012
*/
/* WHAT VERSION IS THIS? */
/*
*
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
//#include "energy_kernel.cu"
//#include "test_kernel.cu"
#include "test_kernel2.cu"
using namespace std;
const double visc = 1.0f;
const double drag = 1.0f;
const float def = 0.1;
// Lame constants
const double lambda = 1.0;
const double mu = 1.0; // shear modulus (G)
// time
const double TMAX = 0.1f;
const double tstep = 0.0001f;/*0.0001f;*/
//static int tstop = floor(TMAX/tstep);
static int tstop = 1;
// curve constants
const int N = 1024;
const double ri = 1.2f;
const double ds = ri/(N-1);
const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
typedef struct vector{
double x; // x-component
double y; // y-component
} vector;
// 2x2 matrix
typedef struct matrix{
double x1; // term (1,1)
double y1; // term (1,2)
double x2; // term (2,1)
double y2; // term (2,2)
} matrix;
// vertex
typedef struct vertex{
vector ref; // reference coords
vector def; // deformation coords
vector force; // force
int exterior; // 1 if this is a boundary point and 0 if this is an interior point
} vertex;
// triangle
typedef struct triangle{
int A;
int B;
int C;
double def_area; // deformed area
double ref_area; // reformed area
matrix f1; // term 1 of the forcing calculation
matrix f2_0_x; // x-component of term 2 of the forcing calculation for vertex 0
matrix f2_0_y; // y-component of term 2 of the forcing calculation for vertex 0
matrix f2_1_x; // x-component of term 2 of the forcing calculation for vertex 1
matrix f2_1_y; // y-component of term 2 of the forcing calculation for vertex 1
matrix f2_2_x; // x-component of term 2 of the forcing calculation for vertex 2
matrix f2_2_y; // y-component of term 2 of the forcing calculation for vertex 2
double f3; // term 3 of the forcing calculation
} triangle;
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Set up preliminary info per triangle (i.e. reference area or term 3 and term 2)
void ref_info(int Npts, triangle &tr, vertex nodes[]) {
// term 3 (otherwise known as reference area)
tr.ref_area = 0.5*fabs( (nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y) );
tr.f3 = tr.ref_area;
// determinant of S
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
// term 2
tr.f2_0_x.x1 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_x.y1 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_0_x.x2 = 0.0;
tr.f2_0_x.y2 = 0.0;
tr.f2_0_y.x1 = 0.0;
tr.f2_0_y.y1 = 0.0;
tr.f2_0_y.x2 = (1.0/detS)*(-1.0*nodes[tr.C].ref.y + nodes[tr.B].ref.y);
tr.f2_0_y.y2 = (1.0/detS)*(nodes[tr.C].ref.x - nodes[tr.B].ref.x);
tr.f2_1_x.x1 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_x.y1 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_1_x.x2 = 0.0;
tr.f2_1_x.y2 = 0.0;
tr.f2_1_y.x1 = 0.0;
tr.f2_1_y.y1 = 0.0;
tr.f2_1_y.x2 = (1.0/detS)*(nodes[tr.C].ref.y - nodes[tr.A].ref.y);
tr.f2_1_y.y2 = (1.0/detS)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x);
tr.f2_2_x.x1 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_x.y1 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
tr.f2_2_x.x2 = 0.0;
tr.f2_2_x.y2 = 0.0;
tr.f2_2_y.x1 = 0.0;
tr.f2_2_y.y1 = 0.0;
tr.f2_2_y.x2 = (1.0/detS)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y);
tr.f2_2_y.y2 = (1.0/detS)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x);
}
// Set up deformation specific info per triangle (i.e. deformed area and term 1)
void def_info(int Npts, triangle &tr, vertex nodes[]) {
// deformed area
tr.def_area = 0.5*fabs((nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].def.y-nodes[tr.A].def.y) - (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].def.x-nodes[tr.A].def.x) );
// deformation gradient tensor
matrix a;
double detS;
detS = (1.0)*((nodes[tr.B].ref.x-nodes[tr.A].ref.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) - (nodes[tr.C].ref.x-nodes[tr.A].ref.x)*(nodes[tr.B].ref.y-nodes[tr.A].ref.y));
a.x1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.y - nodes[tr.B].ref.y) );
a.y1 = (1.0/detS)*( (nodes[tr.B].def.x-nodes[tr.A].def.x)*(nodes[tr.A].ref.x-nodes[tr.C].ref.x) + (nodes[tr.C].def.x-nodes[tr.A].def.x)*(nodes[tr.B].ref.x-nodes[tr.A].ref.x) );
a.x2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.C].ref.y-nodes[tr.A].ref.y) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.y-nodes[tr.B].ref.y) );
a.y2 = (1.0/detS)*( (nodes[tr.B].def.y-nodes[tr.A].def.y)*(nodes[tr.A].ref.x - nodes[tr.C].ref.x) + (nodes[tr.C].def.y-nodes[tr.A].def.y)*(nodes[tr.B].ref.x - nodes[tr.A].ref.x) );
// inverse transpose of deformation gradient tensor (w/o outside normalizers i.e. determinants)
matrix ait;
ait.x1 = a.y2;
ait.y1 = (-1.0)*(a.x2);
ait.x2 = (-1.0)*(a.y1);
ait.y2 = a.x1;
// Cauchy stress tensor
matrix sigma;
// Material displacement gradient tensor ( = deformation gradient tensor - I)
matrix d;
d.x1 = a.x1 - 1.0;
d.y1 = a.y1;
d.x2 = a.x2;
d.y2 = a.y2 - 1.0;
sigma.x1 = lambda*(d.x1+d.y2) + 2.0*mu*d.x1;
sigma.y1 = mu*(d.y1+d.x2);
sigma.x2 = mu*(d.x2+d.y1);
sigma.y2 = lambda*(d.x1+d.y2) + 2.0*mu*d.y2;
// term 1 (otherwise known as 1st Piola-Kirchhoff tensor)
tr.f1.x1 = ( sigma.x1*ait.x1 + sigma.y1*ait.x2 );
tr.f1.y1 = ( sigma.x1*ait.y1 + sigma.y1*ait.y2 );
tr.f1.x2 = ( sigma.x2*ait.x1 + sigma.y2*ait.x2 );
tr.f1.y2 = ( sigma.x2*ait.y1 + sigma.y2*ait.y2 );
}
// Compute velocity vector for all points in the grid
void velocity(int Npts, int Ntris, vertex Nodes[], vector f[], vector v[]) {
int CHUNK = 256;
int mem_size = CHUNK;
for(int i=0; i<Npts; i++) {
v[i].x = 0.0;
v[i].y = 0.0;
}
// Allocate host memory for result (velocity)
// THIS IS UNNECESSARY & I SHOULD CHANGE THIS
double *vx = (double*) malloc(mem_size*16*sizeof(double));
double *vy = (double*) malloc(mem_size*16*sizeof(double));
// Allocate and fill host memory for force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
// Allocate and fill host memory for position
double *xh_at = (double*) malloc(mem_size*sizeof(double));
double *yh_at = (double*) malloc(mem_size*sizeof(double));
double *xh_dueto = (double*) malloc(mem_size*sizeof(double));
double *yh_dueto = (double*) malloc(mem_size*sizeof(double));
double esq = e*e;
cudaEvent_t start, stop;
float elapsedTime;
for(int jj=0; jj<Npts/CHUNK; jj++) {
for(int ii=0; ii<Npts/CHUNK; ii++) {
int at_pos_begin = jj*CHUNK;
int dueto_pos_begin = ii*CHUNK;
int dueto_f_begin = (ii)*CHUNK;
//printf("CHUNK: (%d, %d)\n", ii, jj);
//printf("at_pos %d, dueto_pos %d, force %d\n", at_pos_begin, dueto_pos_begin, dueto_f_begin);
// Fill force and velocity vectors
for(int j=0; j<CHUNK; j++) {
fxh[j] = f[j+dueto_f_begin].x;
fyh[j] = f[j+dueto_f_begin].y;
}
for(int j=0; j<CHUNK; j++) {
xh_at[j] = Nodes[j+at_pos_begin].def.x;
yh_at[j] = Nodes[j+at_pos_begin].def.y;
xh_dueto[j] = Nodes[j+dueto_pos_begin].def.x;
yh_dueto[j] = Nodes[j+dueto_pos_begin].def.y;
}
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd_at, *yd_at, *xd_dueto, *yd_dueto, *Fxd, *Fyd, *Vxd, *Vyd;
cudaMalloc((void**) &xd_at, mem_size*sizeof(double));
cudaMalloc((void**) &yd_at, mem_size*sizeof(double));
cudaMalloc((void**) &xd_dueto, mem_size*sizeof(double));
cudaMalloc((void**) &yd_dueto, mem_size*sizeof(double));
cudaMalloc((void**) &Fxd, mem_size*sizeof(double));
cudaMalloc((void**) &Fyd, mem_size*sizeof(double));
cudaMalloc((void**) &Vxd, mem_size*16*sizeof(double));
cudaMalloc((void**) &Vyd, mem_size*16*sizeof(double));
// Initialize device memory to zero
cudaMemset(Vxd, 0x0, mem_size*16*sizeof(double));
cudaMemset(Vyd, 0x0, mem_size*16*sizeof(double));
// Copy position and force arrays to allocated device memory locations
cudaMemcpy(xd_at, xh_at, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(yd_at, yh_at, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(xd_dueto, xh_dueto, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(yd_dueto, yh_dueto, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fxd, fxh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fyd, fyh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventRecord(start, 0);
// Perform Stokeslet computation
dim3 threads(THREADS, 1);
dim3 grid(BLOCKS, 1);
//printf("Number of threads per block: %d and number of blocks per grid: %d\n", THREADS, BLOCKS);
//SlipKernel<<< grid, threads >>>(xd_at, yd_at, Fxd, Fyd, Vxd, Vyd, visc, e, esq);
SlipKernel<<< grid, threads >>>(xd_at, yd_at, xd_dueto, yd_dueto, Fxd, Fyd, Vxd, Vyd, visc, e, esq);
// Copy the result from device to host
cudaMemcpy(vx, Vxd, mem_size*16*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy, Vyd, mem_size*16*sizeof(double), cudaMemcpyDeviceToHost);
//for(int k=0; k<(mem_size*16); k++) {
// printf("k: %d (vx, vy) = (%.16f,%.16f)\n", k, vx[k], vy[k]);
//}
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Set velocity
//for(int j=0; j<Npts; j++) {
for(int j=0; j<CHUNK; j++) {
int next_chunk = floor(j/16)*16;
for(int k=0; k<16; k++) {
v[j+at_pos_begin].x += vx[j+next_chunk*15+k*16];
v[j+at_pos_begin].y += vy[j+next_chunk*15+k*16];
}
//printf("chunk: (%d, %d), (vx, vy) = (%.16f, %.16f)\n", ii, jj, v[j+at_pos_begin].x, v[j+at_pos_begin].y);
//printf("loc: %d, (vx, vy) = (%.16f, %.16f)\n", j+at_pos_begin, v[j+at_pos_begin].x, v[j+at_pos_begin].y);
}
//cudaEventCreate(&stop);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
// Report timing
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Clean up
cudaFree(xd_at); cudaFree(yd_at);
cudaFree(xd_dueto); cudaFree(yd_dueto);
cudaFree(Fxd); cudaFree(Fyd);
cudaFree(Vxd); cudaFree(Vyd);
}
}
for(int k=0; k<Npts; k++) {
printf("(vx, vy) = (%.16f, %.16f)\n", v[k].x, v[k].y);
}
// Clean up host memory
free(xh_at); free(yh_at);
free(xh_dueto); free(yh_dueto);
free(fxh); free(fyh);
free(vx); free(vy);
}
// Progression
void progress(int Npts, int Ntris, vertex Nodes[], triangle Tris[]) {
vector pos_init[Npts];
vector vel[Npts];
vector v[Npts];
vector f[Npts];
double ftime = 0.0f;
// file handling
ofstream f1, f2, f3;
f1.open("initial_pos_conf.txt"); f2.open("final_pos_conf.txt"); f3.open("ref_pos_conf.txt");
// print initial configuration (i.e. with initial deformation as described in startCurve() )
for(int i=0; i<Npts; i++) {
// zero the force
Nodes[i].force.x = 0.0;
Nodes[i].force.y = 0.0;
pos_init[i].x = Nodes[i].def.x;
pos_init[i].y = Nodes[i].def.y;
f1 << pos_init[i].x << " " << pos_init[i].y << endl;
f3 << Nodes[i].ref.x << " " << Nodes[i].ref.y << endl;
}
f1.close();
f3.close();
for(int t=0; t<tstop; t++) {
clock_t fbegin = clock();
float ref_Tarea = 0.0; float def_Tarea = 0.0;
// CYCLE THROUGH TRIANGLES AND COMPUTE FORCES
for(int j=0; j<Ntris; j++) {
//printf("making a call for triangle: (%d, %d, %d)\n", Tris[j].A, Tris[j].B, Tris[j].C);
def_info(Npts, Tris[j], Nodes);
// vertex A
Nodes[Tris[j].A].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_x.x1 + Tris[j].f1.y1*Tris[j].f2_0_x.y1 + Tris[j].f1.x2*Tris[j].f2_0_x.x2 + Tris[j].f1.y2*Tris[j].f2_0_x.y2);
Nodes[Tris[j].A].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_0_y.x1 + Tris[j].f1.y1*Tris[j].f2_0_y.y1 + Tris[j].f1.x2*Tris[j].f2_0_y.x2 + Tris[j].f1.y2*Tris[j].f2_0_y.y2);
// vertex B
Nodes[Tris[j].B].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_x.x1 + Tris[j].f1.y1*Tris[j].f2_1_x.y1 + Tris[j].f1.x2*Tris[j].f2_1_x.x2 + Tris[j].f1.y2*Tris[j].f2_1_x.y2);
Nodes[Tris[j].B].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_1_y.x1 + Tris[j].f1.y1*Tris[j].f2_1_y.y1 + Tris[j].f1.x2*Tris[j].f2_1_y.x2 + Tris[j].f1.y2*Tris[j].f2_1_y.y2);
// vertex C
Nodes[Tris[j].C].force.x += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_x.x1 + Tris[j].f1.y1*Tris[j].f2_2_x.y1 + Tris[j].f1.x2*Tris[j].f2_2_x.x2 + Tris[j].f1.y2*Tris[j].f2_2_x.y2);
Nodes[Tris[j].C].force.y += -1.0*(Tris[j].ref_area)*(Tris[j].f1.x1*Tris[j].f2_2_y.x1 + Tris[j].f1.y1*Tris[j].f2_2_y.y1 + Tris[j].f1.x2*Tris[j].f2_2_y.x2 + Tris[j].f1.y2*Tris[j].f2_2_y.y2);
ref_Tarea += Tris[j].ref_area;
def_Tarea += Tris[j].def_area;
}
clock_t fend = clock();
for(int k=0; k<Npts; k++) {
f[k].x = Nodes[k].force.x;
f[k].y = Nodes[k].force.y;
Nodes[k].force.x = 0.0;
Nodes[k].force.y = 0.0;
}
// compute velocity fields
velocity(Npts, Ntris, Nodes, f, vel);
// for each node in unstructured mesh
for(int i=0; i<Npts; i++) {
v[i].x = vel[i].x + f[i].x/drag;
v[i].y = vel[i].y + f[i].y/drag;
//printf("%f %f %f %f %f %f\n", Nodes[i].def.x, Nodes[i].def.y, v[i].x, v[i].y, f[i].x, f[i].y);
Nodes[i].def.x = Nodes[i].def.x + tstep*v[i].x;
Nodes[i].def.y = Nodes[i].def.y + tstep*v[i].y;
if(t==tstop-1) { f2 << Nodes[i].def.x << " " << Nodes[i].def.y << endl; }
}
double fpart = diffclock(fend, fbegin);
ftime = fpart + ftime;
}
f2.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
}
// Draw starting configuration
void startCurve() {
// file handling
ifstream f1;
ifstream f2;
ifstream f3;
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
// determine length
int Npoints = -1;
int Ntris = -1;
string c1;
string c2;
while( !f1.eof() ) {
getline(f1, c1);
Npoints++;
}
f1.close();
while( !f2.eof() ) {
getline(f2, c2);
Ntris++;
}
f2.close();
f1.open("data/UnitCirclePointsN1024.txt");
f2.open("data/UnitCircleTrianglesN1024.txt");
f3.open("data/UnitCircleBoundaryN1024.txt");
vector Nodes[Npoints];
triangle Tris[Ntris];
int Boundary[Npoints];
int counter = 0;
double d1, d2;
while(f1 >> d1 >> d2) {
//printf("(%f, %f)\n", d1, d2);
Nodes[counter].x = d1;
Nodes[counter].y = d2;
counter++;
}
f1.close();
counter = 0;
int i1, i2, i3;
while(f2 >> i1 >> i2 >> i3) {
Tris[counter].A = i1-1;
Tris[counter].B = i2-1;
Tris[counter].C = i3-1;
//printf("[%d %d %d]\n", Tris[counter].A, Tris[counter].B, Tris[counter].C);
counter++;
}
f2.close();
counter = 0;
int ext;
// set all points to interior points
for(int k=0; k<Npoints; k++) {
Boundary[k] = 0;
}
while(f3 >> ext) {
Boundary[ext-1] = 1;
counter++;
}
f3.close();
// output to array of vertices and array of triangles
vertex Points[Npoints];
for(int i=0; i<Npoints; i++) {
Points[i].ref.x = Nodes[i].x;
Points[i].ref.y = Nodes[i].y;
Points[i].exterior = Boundary[i];
// SPECIFY DEFORMATION HERE // Step 0: NO deformation
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 1: LINEAR deformation
///// expansion /////
Points[i].def.x = (1.0 - def)*Nodes[i].x;
Points[i].def.y = (1.0 - def)*Nodes[i].y;
///// shear /////
//Points[i].def.x = Nodes[i].x + lambda*Nodes[i].y;
//Points[i].def.y = Nodes[i].y;
///// vertical stretch /////
//Points[i].def.x = Nodes[i].x;
//Points[i].def.y = (1.0 + lambda)*Nodes[i].y;
///// uniaxial extension /////
//Points[i].def.x = lambda*Nodes[i].x;
//Points[i].def.y = (1.0/lambda)*Nodes[i].y;
// SPECIFY DEFORMATION HERE // Step 2: NONLINEAR deformation
//Points[i].def.x = lambda*Nodes[i].x*Nodes[i].x;
//Points[i].def.y = Points[i].def.y;
}
for(int j=0; j<Ntris; j++) {
// find vertices
int iA = Tris[j].A; // index of A vertex
int iB = Tris[j].B; // index of B vertex
int iC = Tris[j].C; // index of C vertex
Points[iA].ref.x = Nodes[iA].x;
Points[iA].ref.y = Nodes[iA].y;
Points[iB].ref.x = Nodes[iB].x;
Points[iB].ref.y = Nodes[iB].y;
Points[iC].ref.x = Nodes[iC].x;
Points[iC].ref.y = Nodes[iC].y;
}
for(int k=0; k<Ntris; k++) {
// find forcing terms that remain constant with any deformation and timestep
ref_info(Npoints, Tris[k], Points);
}
progress(Npoints, Ntris, Points, Tris);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
|
bd6d514999a25375dc768302a4369ec8fe6caee6.hip
|
// !!! This is a file automatically generated by hipify!!!
/* slip.cu
* GPU Benchmark Immersed Boundary Structured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* C.Copos 12/05/2012
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "slip_kernel.cu"
using namespace std;
const double PI = 3.14159265358979323846264338f;
const int N = 128; // # side square partitions
const double visc = 1.0f;
const double flow = 1.0f;
const double drag = 0.0001f;
// time
const double TMAX = 0.1f;/*0.008f;*/
const double tstep = 0.0001f;/*0.0001f;*/
static int tstop = floor(TMAX/tstep);
// curve constants
const double r0 = 1.0f;
const double ri = 1.2f;
const double ds = ri/(N-1); const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
struct vector{
double x; // x-component
double y; // y-component
};
// spring constants
const double kl = 1.0f*ds/*0.1f*ds*/;
const double ks = 0.5f*kl; // from victor camacho's elasticity paper
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Compute average elastic force at point ij due to its nearest neighboring points
vector eforce(int i, int j, double x[N][N], double y[N][N]) {
vector f; // final force
double rl = r0/(N-1); // rest length for longitudinal springs
double rd = sqrtf(2.0f)*rl; // rest length for diagonal springs
int i_0, i_1, i_2, j_0, j_1, j_2;
i_0 = i-1;
i_1 = i;
i_2 = i+1;
j_0 = j-1;
j_1 = j;
j_2 = j+1;
double dlk_00, dlk_01, dlk_02, dlk_10, dlk_12, dlk_20, dlk_21, dlk_22;
vector f_00, f_01, f_02, f_10, f_12, f_20, f_21, f_22;
// distance between point (i,j) = (i_1,j_1) and points ...
// top left corner
if (i_1==0 && j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f;
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// top right corner
else if (i_1==(N-1) && j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f;
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
}
// bottom left corner
else if (i_1==0 && j_1==(N-1)) {
dlk_00 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f; dlk_12 = 0.0f; dlk_22 = 0.0f;
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
}
// bottom right corner
else if (i_1==(N-1) && j_1==(N-1)) {
dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f; dlk_12 = 0.0f; dlk_02 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
}
// top edge
else if (j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f;
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// right edge
else if (i_1==(N-1)) {
dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
}
// bottom edge
else if (j_1==(N-1)) {
dlk_02 = 0.0f; dlk_12 = 0.0f; dlk_22 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
}
// left edge
else if (i_1==0) {
dlk_00 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f;
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// interior
else {
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// finally, compute forces
if (dlk_00 == 0.0f) { f_00.x = 0.0f; f_00.y = 0.0f; }
else {
f_00.x = -1.0f*ks*(dlk_00-rd)*((x[i_1][j_1] - x[i_0][j_0])/dlk_00);
f_00.y = -1.0f*ks*(dlk_00-rd)*((y[i_1][j_1] - y[i_0][j_0])/dlk_00);
}
if (dlk_10 == 0.0f) { f_10.x = 0.0f; f_10.y = 0.0f; }
else {
f_10.x = -1.0f*kl*(dlk_10-rl)*((x[i_1][j_1] - x[i_1][j_0])/dlk_10);
f_10.y = 1.0f*kl*(dlk_10-rl)*((-y[i_1][j_1] + y[i_1][j_0])/dlk_10);
}
if (dlk_20 == 0.0f) { f_20.x = 0.0f; f_20.y = 0.0f; }
else {
f_20.x = -1.0f*ks*(dlk_20-rd)*((x[i_1][j_1] - x[i_2][j_0])/dlk_20);
f_20.y = -1.0f*ks*(dlk_20-rd)*((y[i_1][j_1] - y[i_2][j_0])/dlk_20);
}
if (dlk_01 == 0.0f) { f_01.x = 0.0f; f_01.y = 0.0f; }
else {
f_01.x = -1.0f*kl*(dlk_01-rl)*((x[i_1][j_1] - x[i_0][j_1])/dlk_01);
f_01.y = 1.0f*kl*(dlk_01-rl)*((-y[i_1][j_1] + y[i_0][j_1])/dlk_01);
}
if (dlk_21 == 0.0f) { f_21.x = 0.0f; f_21.y = 0.0f; }
else {
f_21.x = -1.0f*kl*(dlk_21-rl)*((x[i_1][j_1] - x[i_2][j_1])/dlk_21);
f_21.y = 1.0f*kl*(dlk_21-rl)*((-y[i_1][j_1] + y[i_2][j_1])/dlk_21);
}
if (dlk_02 == 0.0f) { f_02.x = 0.0f; f_02.y = 0.0f; }
else {
f_02.x = -1.0f*ks*(dlk_02-rd)*((x[i_1][j_1] - x[i_0][j_2])/dlk_02);
f_02.y = -1.0f*ks*(dlk_02-rd)*((y[i_1][j_1] - y[i_0][j_2])/dlk_02);
}
if (dlk_12 == 0.0f) { f_12.x = 0.0f; f_12.y = 0.0f; }
else {
f_12.x = -1.0f*kl*(dlk_12-rl)*((x[i_1][j_1] - x[i_1][j_2])/dlk_12);
f_12.y = 1.0f*kl*(dlk_12-rl)*((-y[i_1][j_1] + y[i_1][j_2])/dlk_12);
}
if (dlk_22 == 0.0f) { f_22.x = 0.0f; f_22.y = 0.0f; }
else {
f_22.x = -1.0f*ks*(dlk_22-rd)*((x[i_1][j_1] - x[i_2][j_2])/dlk_22);
f_22.y = -1.0f*ks*(dlk_22-rd)*((y[i_1][j_1] - y[i_2][j_2])/dlk_22);
}
// evaluate final force components
f.x = (double)(f_00.x + f_10.x + f_20.x + f_01.x + f_21.x + f_02.x + f_12.x + f_22.x);
f.y = (double)(f_00.y + f_10.y + f_20.y + f_01.y + f_21.y + f_02.y + f_12.y + f_22.y);
// what's going on with the forces?
//printf("%f %f %f %f\n", x[i][j], y[i][j], f.x, f.y);
///*
//printf("Force @ position: (%f,%f) is: (%f,%f)\n", x[i][j], y[i][j], f.x, f.y);
//printf("Force due to (0,0) neighbor is: (%f,%f)\n", f_00.x, f_00.y);
//printf("Force due to (1,0) neighbor is: (%f,%f)\n", f_10.x, f_10.y);
//printf("Force due to (2,0) neighbor is: (%f,%f)\n", f_20.x, f_20.y);
//printf("Force due to (0,1) neighbor is: (%f,%f)\n", f_01.x, f_01.y);
//printf("Force due to (2,1) neighbor is: (%f,%f)\n", f_21.x, f_21.y);
//printf("Force due to (0,2) neighbor is: (%f,%f)\n", f_02.x, f_02.y);
//printf("Force due to (1,2) neighbor is: (%f,%f)\n", f_12.x, f_12.y);
//printf("Force due to (2,2) neighbor is: (%f,%f)\n", f_22.x, f_22.y);
//*/
return f;
}
// Compute pressure at point ij due to all other points in the structured grid
double pressure(int i, int j, double x[N][N], double y[N][N]) {
double p; vector f;
double pp = 0.0f; // partial pressure
double rk = 0.0f; double sq = 0.0f;
for (int jj=0; jj<N; jj++) { // loop over all nodes in the grid
for (int ii=0; ii<N; ii++) {
f.x = eforce(ii, jj, x, y).x;
f.y = eforce(ii, jj, x, y).y;
double r = sqrtf(powf(x[i][j],2) + powf(y[i][j],2));
double pk = sqrtf(powf(x[ii][jj],2)+powf(y[ii][jj],2));
double theta = atan2f(y[i][j], x[i][j]);
double thetaj = atan2f(y[ii][jj], x[ii][jj]);
double dtheta;
if (theta>PI) { dtheta = thetaj + 2*PI - theta; }
else { dtheta = theta - thetaj; }
// dealing with rounding off errors
if (dtheta > -0.000001 && dtheta < 0.000001) { rk = 0.0f; }
else { rk = sqrtf(powf(r,2) + powf(pk,2) - 2*r*pk*cosf(dtheta)); } // rk^2 = |x-xk|^2 = r^2 + pk^2 - 2*r*pk*cos(theta-thetak) where r=|x|, pk=|xk|
sq = sqrtf(powf(rk,2)+powf(e,2));
double h = (1.0f)/(2.0f*PI) * (powf(rk,2)+2.0f*powf(e,2)+e*sq)/((sq+e)*powf(sq,1.5f));
pp = (f.x*(x[i][j]-x[ii][jj]) + f.y*(y[i][j]-y[ii][jj])) * h;
p += pp;
}
}
return p;
}
// Compute velocity vector for all points in the grid
void velocity(double x[N][N], double y[N][N], double vx[], double vy[], int size) {
int mem_size = N*N;
// Allocate host memory for flatten xh, yh
double *xh = (double*) malloc(mem_size*sizeof(double));
double *yh = (double*) malloc(mem_size*sizeof(double));
// Flatten 2D arrays on host (going by rows)
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
xh[i+j*N] = x[i][j];
yh[i+j*N] = y[i][j];
}
}
// Allocate host memory for full grid force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
fxh[i+j*N] = eforce(i, j, x, y).x;
fyh[i+j*N] = eforce(i, j, x, y).y;
}
}
// Allocate host memory for "result" (i.e. velocity array)
if (size != mem_size) { printf("Incorrect velocity array dimensions were declared in progression\n"); }
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd, *yd, *Fxd, *Fyd, *Vxd, *Vyd, *Gxd, *Gyd;
hipMalloc((void**) &xd, mem_size*sizeof(double));
hipMalloc((void**) &yd, mem_size*sizeof(double));
hipMalloc((void**) &Fxd, mem_size*sizeof(double));
hipMalloc((void**) &Fyd, mem_size*sizeof(double));
hipMalloc((void**) &Vxd, mem_size*sizeof(double));
hipMalloc((void**) &Vyd, mem_size*sizeof(double));
hipMalloc((void**) &Gxd, mem_size*mem_size*sizeof(double));
hipMalloc((void**) &Gyd, mem_size*mem_size*sizeof(double));
// Initialize Stokeslet to zero
hipMemset(Gxd, 0.0f, mem_size*mem_size*sizeof(double));
hipMemset(Gyd, 0.0f, mem_size*mem_size*sizeof(double));
// Copy position and force arrays to allocated device memory locations
hipMemcpy(xd, xh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(yd, yh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fxd, fxh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Fyd, fyh, mem_size*sizeof(double), hipMemcpyHostToDevice);
hipEvent_t start, stop; float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
// Perform Stokeslet computation
if(N<128) {
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(width/threads.x , height/threads.y);
hipLaunchKernelGGL(( SlipKernel), dim3(grid), dim3(threads) , 0, 0, xd, yd, Fxd, Fyd, Vxd, Vyd, Gxd, Gyd, N, visc, e, PI);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Copy the result from device to host
hipMemcpy(vx, Vxd, mem_size*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy, Vyd, mem_size*sizeof(double), hipMemcpyDeviceToHost);
//for(int i=0; i<mem_size; i++) {
//printf("(vx, vy) = (%.10f,%.10f)\n", vx[i], vy[i]);
//}
// Report timing
hipEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
}
else if (N==128) {
double *vx1 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx2 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx3 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx4 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy1 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy2 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy3 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy4 = (double*) malloc((mem_size/4)*sizeof(double));
for(int i=0; i<4; i++) {
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((width/4)/threads.x , (height/4)/threads.y);
hipLaunchKernelGGL(( SlipKernel), dim3(grid), dim3(threads) , 0, 0, xd, yd, Fxd, Fyd, Vxd, Vyd, Gxd, Gyd, 64, visc, e, PI);
// Copy the result from device to host
if (i==0) {
hipMemcpy(vx1, Vxd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy1, Vyd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
}
else if (i==1) {
hipMemcpy(vx2, Vxd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy2, Vyd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
}
else if (i==2) {
hipMemcpy(vx3, Vxd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy3, Vyd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
}
else {
hipMemcpy(vx4, Vxd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vy4, Vyd, (mem_size/4)*sizeof(double), hipMemcpyDeviceToHost);
}
//for(int i=0; i<mem_size; i++) {
//printf("(vx, vy) = (%.10f,%.10f)\n", vx[i], vy[i]);
//}
}
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Report timing
hipEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Put together arrays
for(int i=0; i<N*N; i++) {
if(i<=(N*N)/4-1) { vx[i] = vx1[i]; vy[i] = vy1[i]; }
else if(i<=(N*N)/2-1) { vx[i] = vx2[i]; vy[i] = vy2[i]; }
else if(i<=3*(N*N)/4-1) { vx[i] = vx3[i]; vy[i] = vy3[i]; }
else { vx[i] = vx4[i]; vy[i] = vy4[i]; }
}
}
// Clean up
free(xh); free(yh);
free(fxh); free(fyh);
hipFree(xd); hipFree(yd);
hipFree(Fxd); hipFree(Fyd);
hipFree(Gxd); hipFree(Gyd);
hipFree(Vxd); hipFree(Vyd);
}
// Compute polygonal area
double area(double x[N][N], double y[N][N]) {
double A = 0.0f;
int k1; int k2; // k1 = k; k2 = k+1
double x_ext[4*N-4]; double y_ext[4*N-4]; // exterior points
/* Check for shear that we obtain a perfectly rotate uniformly discretized grid
// check horizontal sides
for(int j=0; j<N; j++) {
for(int i=0; i<N-1; i++) {
double x_current = x[i][j]; double x_next = x[i+1][j];
double y_current = y[i][j]; double y_next = y[i+1][j];
double horiz_side = sqrtf(powf(x_current-x_next,2) + powf(y_current-y_next,2));
printf("h(%d, %d) = %f\n", i, j, horiz_side);
}
}
// check vertical sides
for(int i=0; i<N; i++) {
for(int j=0; j<N-1; j++) {
double x_current = x[i][j]; double x_next = x[i][j+1];
double y_current = y[i][j]; double y_next = y[i][j+1];
double vert_side = fabs(y_next - y_current);
printf("v(%d, %d) = %f\n", i, j, vert_side);
}
}
*/
// rearrange points in vector that contain exterior points
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
if(j==0) { x_ext[i] = x[i][j]; y_ext[i] = y[i][j]; } // bottom
else if((i==(N-1)) && (j!=0)) { x_ext[j+i] = x[i][j]; y_ext[j+i] = y[i][j]; } // right
else if((j==(N-1)) && (i!=(N-1))) { x_ext[3*j-i] = x[i][j]; y_ext[3*j-i] = y[i][j]; } // bottom
else if((i==0) && (j!=0) && (j!=(N-1))) { x_ext[4*(N-1)-j] = x[i][j]; y_ext[4*(N-1)-j] = y[i][j]; } // left
}
}
for(int k=0; k<(4*N-4); k++) {
k1 = k;
if(k1 == (4*N-5)) { k2 = 0; }
else k2 = k+1;
A += 0.5f * (x_ext[k1]*y_ext[k2]-x_ext[k2]*y_ext[k1]);
}
return A;
}
// Progression
void progress(double x[N][N], double y[N][N]) {
int size = N*N;
double x_i[N][N]; double y_i[N][N];
double velx[size]; double vely[size];// velocity vector array of size N^2
vector ef; // elastic force
double p;
double ftime = 0.0f;
// file handling
ofstream f1, f2;
//ofstream f3;
f1.open("initial_conf.txt"); f2.open("final_conf.txt"); //f3.open("debug_conf.txt");
// saving initial configuration
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
x_i[i][j] = x[i][j]; y_i[i][j] = y[i][j]; // saving initial configuration for initial area calculation
f1 << x_i[i][j] << " " << y_i[i][j] << endl;
}
}
for(int t=0; t<tstop; t++) {
// fill/compute velocity array
velocity(x, y, velx, vely, size);
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
clock_t fbegin = clock();
ef = eforce(i, j, x, y);
clock_t fend = clock();
x[i][j] = x[i][j] + tstep*(velx[i+j*N] + ef.x/drag);
y[i][j] = y[i][j] + tstep*(vely[i+j*N] + ef.y/drag);
double partf = diffclock(fend, fbegin);
ftime = ftime + partf;
if(t==tstop-1) { f2 << x[i][j] << " " << y[i][j] << endl; }
//f3 << "tstep: " << tstep << endl;
//f3 << t << ": vx = " << velx[i+j*N] << ", vy = " << vely[i+j*N] << ", fx = " << ef.x << ", fy = " << ef.y << endl;
//f3 << t << ": x = " << x[i][j] << ", y = " << y[i][j] << endl;
}
}
}
f1.close(); f2.close(); //f3.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
printf("Starting area: %.16f, Final area: %.16f\n", fabs(area(x_i,y_i)), fabs(area(x,y)) );
}
// Draw starting configuration
void startCurve() {
double xf[N][N]; double yf[N][N];
///*
// stretch uniformly in each direction
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = ri/(N-1); double dy = ri/(N-1);
xf[i][j] = -ri/2 + dx*i;
yf[i][j] = ri/2 - dy*j;
}
}
//*/
/*
// stretch in y-direction only
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = r0/(N-1); double dy = ri/(N-1);
xf[i][j] = -r0/2 + dx*i;
yf[i][j] = ri/2 - dy*j;
}
}
*/
/*
// shear in both directions
double lambda = 0.5f; // shear element
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = r0/(N-1); double dy = r0/(N-1);
yf[i][j] = r0/2 - dy*j;
xf[i][j] = -r0/2 + dx*i + lambda*yf[i][j];
}
}
*/
progress(xf, yf);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
|
bd6d514999a25375dc768302a4369ec8fe6caee6.cu
|
/* slip.cu
* GPU Benchmark Immersed Boundary Structured Grid
* Based on "The Method of Regularized Stokelets" by R.Cortez`
* C.Copos 12/05/2012
*/
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
#include "slip_kernel.cu"
using namespace std;
const double PI = 3.14159265358979323846264338f;
const int N = 128; // # side square partitions
const double visc = 1.0f;
const double flow = 1.0f;
const double drag = 0.0001f;
// time
const double TMAX = 0.1f;/*0.008f;*/
const double tstep = 0.0001f;/*0.0001f;*/
static int tstop = floor(TMAX/tstep);
// curve constants
const double r0 = 1.0f;
const double ri = 1.2f;
const double ds = ri/(N-1); const double e = 1.2f*ds; // e: parameter determining width of blobs or cutoffs
// vector structure
struct vector{
double x; // x-component
double y; // y-component
};
// spring constants
const double kl = 1.0f*ds/*0.1f*ds*/;
const double ks = 0.5f*kl; // from victor camacho's elasticity paper
// gpu timing
double gpuTime = 0.0f;
// Compute time difference in seconds
double diffclock(clock_t s, clock_t e) {
double diffticks = s-e;
double diffms = (diffticks)/CLOCKS_PER_SEC;
return diffms;
}
// Compute average elastic force at point ij due to its nearest neighboring points
vector eforce(int i, int j, double x[N][N], double y[N][N]) {
vector f; // final force
double rl = r0/(N-1); // rest length for longitudinal springs
double rd = sqrtf(2.0f)*rl; // rest length for diagonal springs
int i_0, i_1, i_2, j_0, j_1, j_2;
i_0 = i-1;
i_1 = i;
i_2 = i+1;
j_0 = j-1;
j_1 = j;
j_2 = j+1;
double dlk_00, dlk_01, dlk_02, dlk_10, dlk_12, dlk_20, dlk_21, dlk_22;
vector f_00, f_01, f_02, f_10, f_12, f_20, f_21, f_22;
// distance between point (i,j) = (i_1,j_1) and points ...
// top left corner
if (i_1==0 && j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f;
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// top right corner
else if (i_1==(N-1) && j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f;
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
}
// bottom left corner
else if (i_1==0 && j_1==(N-1)) {
dlk_00 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f; dlk_12 = 0.0f; dlk_22 = 0.0f;
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
}
// bottom right corner
else if (i_1==(N-1) && j_1==(N-1)) {
dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f; dlk_12 = 0.0f; dlk_02 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
}
// top edge
else if (j_1==0) {
dlk_00 = 0.0f; dlk_10 = 0.0f; dlk_20 = 0.0f;
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// right edge
else if (i_1==(N-1)) {
dlk_20 = 0.0f; dlk_21 = 0.0f; dlk_22 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
}
// bottom edge
else if (j_1==(N-1)) {
dlk_02 = 0.0f; dlk_12 = 0.0f; dlk_22 = 0.0f;
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
}
// left edge
else if (i_1==0) {
dlk_00 = 0.0f; dlk_01 = 0.0f; dlk_02 = 0.0f;
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// interior
else {
dlk_00 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_0],2) + powf(y[i_1][j_1]-y[i_0][j_0],2));
dlk_10 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_0],2) + powf(y[i_1][j_1]-y[i_1][j_0],2));
dlk_20 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_0],2) + powf(y[i_1][j_1]-y[i_2][j_0],2));
dlk_01 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_1],2) + powf(y[i_1][j_1]-y[i_0][j_1],2));
dlk_21 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_1],2) + powf(y[i_1][j_1]-y[i_2][j_1],2));
dlk_02 = sqrtf(powf(x[i_1][j_1]-x[i_0][j_2],2) + powf(y[i_1][j_1]-y[i_0][j_2],2));
dlk_12 = sqrtf(powf(x[i_1][j_1]-x[i_1][j_2],2) + powf(y[i_1][j_1]-y[i_1][j_2],2));
dlk_22 = sqrtf(powf(x[i_1][j_1]-x[i_2][j_2],2) + powf(y[i_1][j_1]-y[i_2][j_2],2));
}
// finally, compute forces
if (dlk_00 == 0.0f) { f_00.x = 0.0f; f_00.y = 0.0f; }
else {
f_00.x = -1.0f*ks*(dlk_00-rd)*((x[i_1][j_1] - x[i_0][j_0])/dlk_00);
f_00.y = -1.0f*ks*(dlk_00-rd)*((y[i_1][j_1] - y[i_0][j_0])/dlk_00);
}
if (dlk_10 == 0.0f) { f_10.x = 0.0f; f_10.y = 0.0f; }
else {
f_10.x = -1.0f*kl*(dlk_10-rl)*((x[i_1][j_1] - x[i_1][j_0])/dlk_10);
f_10.y = 1.0f*kl*(dlk_10-rl)*((-y[i_1][j_1] + y[i_1][j_0])/dlk_10);
}
if (dlk_20 == 0.0f) { f_20.x = 0.0f; f_20.y = 0.0f; }
else {
f_20.x = -1.0f*ks*(dlk_20-rd)*((x[i_1][j_1] - x[i_2][j_0])/dlk_20);
f_20.y = -1.0f*ks*(dlk_20-rd)*((y[i_1][j_1] - y[i_2][j_0])/dlk_20);
}
if (dlk_01 == 0.0f) { f_01.x = 0.0f; f_01.y = 0.0f; }
else {
f_01.x = -1.0f*kl*(dlk_01-rl)*((x[i_1][j_1] - x[i_0][j_1])/dlk_01);
f_01.y = 1.0f*kl*(dlk_01-rl)*((-y[i_1][j_1] + y[i_0][j_1])/dlk_01);
}
if (dlk_21 == 0.0f) { f_21.x = 0.0f; f_21.y = 0.0f; }
else {
f_21.x = -1.0f*kl*(dlk_21-rl)*((x[i_1][j_1] - x[i_2][j_1])/dlk_21);
f_21.y = 1.0f*kl*(dlk_21-rl)*((-y[i_1][j_1] + y[i_2][j_1])/dlk_21);
}
if (dlk_02 == 0.0f) { f_02.x = 0.0f; f_02.y = 0.0f; }
else {
f_02.x = -1.0f*ks*(dlk_02-rd)*((x[i_1][j_1] - x[i_0][j_2])/dlk_02);
f_02.y = -1.0f*ks*(dlk_02-rd)*((y[i_1][j_1] - y[i_0][j_2])/dlk_02);
}
if (dlk_12 == 0.0f) { f_12.x = 0.0f; f_12.y = 0.0f; }
else {
f_12.x = -1.0f*kl*(dlk_12-rl)*((x[i_1][j_1] - x[i_1][j_2])/dlk_12);
f_12.y = 1.0f*kl*(dlk_12-rl)*((-y[i_1][j_1] + y[i_1][j_2])/dlk_12);
}
if (dlk_22 == 0.0f) { f_22.x = 0.0f; f_22.y = 0.0f; }
else {
f_22.x = -1.0f*ks*(dlk_22-rd)*((x[i_1][j_1] - x[i_2][j_2])/dlk_22);
f_22.y = -1.0f*ks*(dlk_22-rd)*((y[i_1][j_1] - y[i_2][j_2])/dlk_22);
}
// evaluate final force components
f.x = (double)(f_00.x + f_10.x + f_20.x + f_01.x + f_21.x + f_02.x + f_12.x + f_22.x);
f.y = (double)(f_00.y + f_10.y + f_20.y + f_01.y + f_21.y + f_02.y + f_12.y + f_22.y);
// what's going on with the forces?
//printf("%f %f %f %f\n", x[i][j], y[i][j], f.x, f.y);
///*
//printf("Force @ position: (%f,%f) is: (%f,%f)\n", x[i][j], y[i][j], f.x, f.y);
//printf("Force due to (0,0) neighbor is: (%f,%f)\n", f_00.x, f_00.y);
//printf("Force due to (1,0) neighbor is: (%f,%f)\n", f_10.x, f_10.y);
//printf("Force due to (2,0) neighbor is: (%f,%f)\n", f_20.x, f_20.y);
//printf("Force due to (0,1) neighbor is: (%f,%f)\n", f_01.x, f_01.y);
//printf("Force due to (2,1) neighbor is: (%f,%f)\n", f_21.x, f_21.y);
//printf("Force due to (0,2) neighbor is: (%f,%f)\n", f_02.x, f_02.y);
//printf("Force due to (1,2) neighbor is: (%f,%f)\n", f_12.x, f_12.y);
//printf("Force due to (2,2) neighbor is: (%f,%f)\n", f_22.x, f_22.y);
//*/
return f;
}
// Compute pressure at point ij due to all other points in the structured grid
double pressure(int i, int j, double x[N][N], double y[N][N]) {
double p; vector f;
double pp = 0.0f; // partial pressure
double rk = 0.0f; double sq = 0.0f;
for (int jj=0; jj<N; jj++) { // loop over all nodes in the grid
for (int ii=0; ii<N; ii++) {
f.x = eforce(ii, jj, x, y).x;
f.y = eforce(ii, jj, x, y).y;
double r = sqrtf(powf(x[i][j],2) + powf(y[i][j],2));
double pk = sqrtf(powf(x[ii][jj],2)+powf(y[ii][jj],2));
double theta = atan2f(y[i][j], x[i][j]);
double thetaj = atan2f(y[ii][jj], x[ii][jj]);
double dtheta;
if (theta>PI) { dtheta = thetaj + 2*PI - theta; }
else { dtheta = theta - thetaj; }
// dealing with rounding off errors
if (dtheta > -0.000001 && dtheta < 0.000001) { rk = 0.0f; }
else { rk = sqrtf(powf(r,2) + powf(pk,2) - 2*r*pk*cosf(dtheta)); } // rk^2 = |x-xk|^2 = r^2 + pk^2 - 2*r*pk*cos(theta-thetak) where r=|x|, pk=|xk|
sq = sqrtf(powf(rk,2)+powf(e,2));
double h = (1.0f)/(2.0f*PI) * (powf(rk,2)+2.0f*powf(e,2)+e*sq)/((sq+e)*powf(sq,1.5f));
pp = (f.x*(x[i][j]-x[ii][jj]) + f.y*(y[i][j]-y[ii][jj])) * h;
p += pp;
}
}
return p;
}
// Compute velocity vector for all points in the grid
void velocity(double x[N][N], double y[N][N], double vx[], double vy[], int size) {
int mem_size = N*N;
// Allocate host memory for flatten xh, yh
double *xh = (double*) malloc(mem_size*sizeof(double));
double *yh = (double*) malloc(mem_size*sizeof(double));
// Flatten 2D arrays on host (going by rows)
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
xh[i+j*N] = x[i][j];
yh[i+j*N] = y[i][j];
}
}
// Allocate host memory for full grid force
double *fxh = (double*) malloc(mem_size*sizeof(double));
double *fyh = (double*) malloc(mem_size*sizeof(double));
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
fxh[i+j*N] = eforce(i, j, x, y).x;
fyh[i+j*N] = eforce(i, j, x, y).y;
}
}
// Allocate host memory for "result" (i.e. velocity array)
if (size != mem_size) { printf("Incorrect velocity array dimensions were declared in progression\n"); }
// Allocate device memory for x, y, F, v, and G (where G is the Stokeslet matrix)
double *xd, *yd, *Fxd, *Fyd, *Vxd, *Vyd, *Gxd, *Gyd;
cudaMalloc((void**) &xd, mem_size*sizeof(double));
cudaMalloc((void**) &yd, mem_size*sizeof(double));
cudaMalloc((void**) &Fxd, mem_size*sizeof(double));
cudaMalloc((void**) &Fyd, mem_size*sizeof(double));
cudaMalloc((void**) &Vxd, mem_size*sizeof(double));
cudaMalloc((void**) &Vyd, mem_size*sizeof(double));
cudaMalloc((void**) &Gxd, mem_size*mem_size*sizeof(double));
cudaMalloc((void**) &Gyd, mem_size*mem_size*sizeof(double));
// Initialize Stokeslet to zero
cudaMemset(Gxd, 0.0f, mem_size*mem_size*sizeof(double));
cudaMemset(Gyd, 0.0f, mem_size*mem_size*sizeof(double));
// Copy position and force arrays to allocated device memory locations
cudaMemcpy(xd, xh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(yd, yh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fxd, fxh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Fyd, fyh, mem_size*sizeof(double), cudaMemcpyHostToDevice);
cudaEvent_t start, stop; float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
// Perform Stokeslet computation
if(N<128) {
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(width/threads.x , height/threads.y);
SlipKernel<<< grid, threads >>>(xd, yd, Fxd, Fyd, Vxd, Vyd, Gxd, Gyd, N, visc, e, PI);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Copy the result from device to host
cudaMemcpy(vx, Vxd, mem_size*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy, Vyd, mem_size*sizeof(double), cudaMemcpyDeviceToHost);
//for(int i=0; i<mem_size; i++) {
//printf("(vx, vy) = (%.10f,%.10f)\n", vx[i], vy[i]);
//}
// Report timing
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
}
else if (N==128) {
double *vx1 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx2 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx3 = (double*) malloc((mem_size/4)*sizeof(double));
double *vx4 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy1 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy2 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy3 = (double*) malloc((mem_size/4)*sizeof(double));
double *vy4 = (double*) malloc((mem_size/4)*sizeof(double));
for(int i=0; i<4; i++) {
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((width/4)/threads.x , (height/4)/threads.y);
SlipKernel<<< grid, threads >>>(xd, yd, Fxd, Fyd, Vxd, Vyd, Gxd, Gyd, 64, visc, e, PI);
// Copy the result from device to host
if (i==0) {
cudaMemcpy(vx1, Vxd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy1, Vyd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
}
else if (i==1) {
cudaMemcpy(vx2, Vxd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy2, Vyd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
}
else if (i==2) {
cudaMemcpy(vx3, Vxd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy3, Vyd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
}
else {
cudaMemcpy(vx4, Vxd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vy4, Vyd, (mem_size/4)*sizeof(double), cudaMemcpyDeviceToHost);
}
//for(int i=0; i<mem_size; i++) {
//printf("(vx, vy) = (%.10f,%.10f)\n", vx[i], vy[i]);
//}
}
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Report timing
cudaEventElapsedTime(&elapsedTime, start, stop);
gpuTime += elapsedTime;
// Put together arrays
for(int i=0; i<N*N; i++) {
if(i<=(N*N)/4-1) { vx[i] = vx1[i]; vy[i] = vy1[i]; }
else if(i<=(N*N)/2-1) { vx[i] = vx2[i]; vy[i] = vy2[i]; }
else if(i<=3*(N*N)/4-1) { vx[i] = vx3[i]; vy[i] = vy3[i]; }
else { vx[i] = vx4[i]; vy[i] = vy4[i]; }
}
}
// Clean up
free(xh); free(yh);
free(fxh); free(fyh);
cudaFree(xd); cudaFree(yd);
cudaFree(Fxd); cudaFree(Fyd);
cudaFree(Gxd); cudaFree(Gyd);
cudaFree(Vxd); cudaFree(Vyd);
}
// Compute polygonal area
double area(double x[N][N], double y[N][N]) {
double A = 0.0f;
int k1; int k2; // k1 = k; k2 = k+1
double x_ext[4*N-4]; double y_ext[4*N-4]; // exterior points
/* Check for shear that we obtain a perfectly rotate uniformly discretized grid
// check horizontal sides
for(int j=0; j<N; j++) {
for(int i=0; i<N-1; i++) {
double x_current = x[i][j]; double x_next = x[i+1][j];
double y_current = y[i][j]; double y_next = y[i+1][j];
double horiz_side = sqrtf(powf(x_current-x_next,2) + powf(y_current-y_next,2));
printf("h(%d, %d) = %f\n", i, j, horiz_side);
}
}
// check vertical sides
for(int i=0; i<N; i++) {
for(int j=0; j<N-1; j++) {
double x_current = x[i][j]; double x_next = x[i][j+1];
double y_current = y[i][j]; double y_next = y[i][j+1];
double vert_side = fabs(y_next - y_current);
printf("v(%d, %d) = %f\n", i, j, vert_side);
}
}
*/
// rearrange points in vector that contain exterior points
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
if(j==0) { x_ext[i] = x[i][j]; y_ext[i] = y[i][j]; } // bottom
else if((i==(N-1)) && (j!=0)) { x_ext[j+i] = x[i][j]; y_ext[j+i] = y[i][j]; } // right
else if((j==(N-1)) && (i!=(N-1))) { x_ext[3*j-i] = x[i][j]; y_ext[3*j-i] = y[i][j]; } // bottom
else if((i==0) && (j!=0) && (j!=(N-1))) { x_ext[4*(N-1)-j] = x[i][j]; y_ext[4*(N-1)-j] = y[i][j]; } // left
}
}
for(int k=0; k<(4*N-4); k++) {
k1 = k;
if(k1 == (4*N-5)) { k2 = 0; }
else k2 = k+1;
A += 0.5f * (x_ext[k1]*y_ext[k2]-x_ext[k2]*y_ext[k1]);
}
return A;
}
// Progression
void progress(double x[N][N], double y[N][N]) {
int size = N*N;
double x_i[N][N]; double y_i[N][N];
double velx[size]; double vely[size];// velocity vector array of size N^2
vector ef; // elastic force
double p;
double ftime = 0.0f;
// file handling
ofstream f1, f2;
//ofstream f3;
f1.open("initial_conf.txt"); f2.open("final_conf.txt"); //f3.open("debug_conf.txt");
// saving initial configuration
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
x_i[i][j] = x[i][j]; y_i[i][j] = y[i][j]; // saving initial configuration for initial area calculation
f1 << x_i[i][j] << " " << y_i[i][j] << endl;
}
}
for(int t=0; t<tstop; t++) {
// fill/compute velocity array
velocity(x, y, velx, vely, size);
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
clock_t fbegin = clock();
ef = eforce(i, j, x, y);
clock_t fend = clock();
x[i][j] = x[i][j] + tstep*(velx[i+j*N] + ef.x/drag);
y[i][j] = y[i][j] + tstep*(vely[i+j*N] + ef.y/drag);
double partf = diffclock(fend, fbegin);
ftime = ftime + partf;
if(t==tstop-1) { f2 << x[i][j] << " " << y[i][j] << endl; }
//f3 << "tstep: " << tstep << endl;
//f3 << t << ": vx = " << velx[i+j*N] << ", vy = " << vely[i+j*N] << ", fx = " << ef.x << ", fy = " << ef.y << endl;
//f3 << t << ": x = " << x[i][j] << ", y = " << y[i][j] << endl;
}
}
}
f1.close(); f2.close(); //f3.close();
// compute final area
printf("Total focing computation time (s): %.10f\n", ftime);
printf("Starting area: %.16f, Final area: %.16f\n", fabs(area(x_i,y_i)), fabs(area(x,y)) );
}
// Draw starting configuration
void startCurve() {
double xf[N][N]; double yf[N][N];
///*
// stretch uniformly in each direction
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = ri/(N-1); double dy = ri/(N-1);
xf[i][j] = -ri/2 + dx*i;
yf[i][j] = ri/2 - dy*j;
}
}
//*/
/*
// stretch in y-direction only
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = r0/(N-1); double dy = ri/(N-1);
xf[i][j] = -r0/2 + dx*i;
yf[i][j] = ri/2 - dy*j;
}
}
*/
/*
// shear in both directions
double lambda = 0.5f; // shear element
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
double dx = r0/(N-1); double dy = r0/(N-1);
yf[i][j] = r0/2 - dy*j;
xf[i][j] = -r0/2 + dx*i + lambda*yf[i][j];
}
}
*/
progress(xf, yf);
}
// Main
int main(int argc, char **argv) {
clock_t begin = clock();
startCurve();
clock_t end = clock();
printf("GPU computation time (ms): %.10f \n", gpuTime);
printf("Total computation time (s): %.10f\n", double(diffclock(end,begin)));
return 0;
}
|
bc63def8e7f23bbdf7cd214035d74e9aeb276109.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char **argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s) { fprintf(stderr, "error: %s\n", s); }
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file) {
int i, j, index = 0;
FILE *fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file) {
int i, j;
FILE *fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
printf("The file was not opened\n");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
// if ((sscanf(str, "%d%f", &index, &val) != 2) || (index !=
// ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
__global__ void calculate_temp(int iteration, // number of iteration
float *power, // power input
float *temp_src, // temperature input/output
float *temp_dst, // temperature input/output
int grid_cols, // Col of grid
int grid_rows, // Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, // Capacitance
float Rx, float Ry, float Rz, float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE]
[BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1, Ry_1, Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows * by - border_rows;
int blkX = small_block_cols * bx - border_cols;
int blkYmax = blkY + BLOCK_SIZE - 1;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int yidx = blkY + ty;
int xidx = blkX + tx;
// load data if it is within the valid input range
int loadYidx = yidx, loadXidx = xidx;
int index = grid_cols * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) &&
IN_RANGE(loadXidx, 0, grid_cols - 1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from
// global memory to shared memory
power_on_cuda[ty][tx] =
power[index]; // Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows - 1)
? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1)
: BLOCK_SIZE - 1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols - 1)
? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1)
: BLOCK_SIZE - 1;
int N = ty - 1;
int S = ty + 1;
int W = tx - 1;
int E = tx + 1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i = 0; i < iteration; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax)) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] +
step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] -
2.0 * temp_on_cuda[ty][tx]) *
Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] -
2.0 * temp_on_cuda[ty][tx]) *
Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if (i == iteration - 1)
break;
if (computed) // Assign the computation range
temp_on_cuda[ty][tx] = temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index] = temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], int col,
int row, int total_iterations, int num_iterations,
int blockCols, int blockRows, int borderCols,
int borderRows) {
#ifdef PREF
hipStream_t stream1;
hipStream_t stream2;
hipStream_t stream3;
hipStream_t stream4;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
hipMemPrefetchAsync(MatrixPower,row*col*sizeof(float), 0, stream1 );
hipMemPrefetchAsync(MatrixTemp[0],sizeof(float)*row*col, 0, stream2 );
hipMemPrefetchAsync(MatrixTemp[1],sizeof(float)*row*col, 0, stream3 );
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
hipStreamSynchronize(stream3);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock),0, stream4,
MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src],
MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz,
step, time_elapsed);
hipDeviceSynchronize();
}
return dst;
#else
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src],
MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz,
step, time_elapsed);
hipDeviceSynchronize();
}
return dst;
#endif
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> "
"<temp_file> <power_file> <output_file>\n",
argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid "
"(positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial "
"temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated "
"power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char **argv) {
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc, argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv) {
int size;
int grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if ((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[1])) <= 0 ||
(pyramid_height = atoi(argv[2])) <= 0 ||
(total_iterations = atoi(argv[3])) <= 0)
usage(argc, argv);
tfile = argv[4];
pfile = argv[5];
ofile = argv[6];
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
#define EXPAND_RATE \
2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE / 2;
int borderRows = (pyramid_height)*EXPAND_RATE / 2;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int blockCols =
grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1);
int blockRows =
grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1);
float *MatrixTemp[2], *MatrixPower;
// FilesavingTemp = (float *)malloc(size * sizeof(float));
hipMallocManaged(&MatrixTemp[0], sizeof(float) * size);
hipMallocManaged(&MatrixTemp[1], sizeof(float) * size);
// FilesavingPower = (float *)malloc(size * sizeof(float));
MatrixOut = (float *)calloc(size, sizeof(float));
if (!MatrixTemp[1] || !MatrixTemp[0] || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, "
"%d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows,
blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(MatrixTemp[0], grid_rows, grid_cols, tfile);
readinput(MatrixTemp[1], grid_rows, grid_cols, pfile);
// hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
// hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
// hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size,
// hipMemcpyHostToDevice);
hipMallocManaged(&MatrixPower, sizeof(float) * size);
// hipMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size,
// hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret= 0;
for (int i = 0; i < 1; i++){
ret = compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows,
total_iterations, pyramid_height, blockCols,
blockRows, borderCols, borderRows);
printf("Ending simulation\n");
}
// hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size,
// hipMemcpyDeviceToHost);
writeoutput(MatrixTemp[ret], grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
// free(MatrixOut);
}
|
bc63def8e7f23bbdf7cd214035d74e9aeb276109.cu
|
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char **argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s) { fprintf(stderr, "error: %s\n", s); }
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file) {
int i, j, index = 0;
FILE *fp;
char str[STR_SIZE];
if ((fp = fopen(file, "w")) == 0)
printf("The file was not opened\n");
for (i = 0; i < grid_rows; i++)
for (j = 0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i * grid_cols + j]);
fputs(str, fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file) {
int i, j;
FILE *fp;
char str[STR_SIZE];
float val;
if ((fp = fopen(file, "r")) == 0)
printf("The file was not opened\n");
for (i = 0; i <= grid_rows - 1; i++)
for (j = 0; j <= grid_cols - 1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
// if ((sscanf(str, "%d%f", &index, &val) != 2) || (index !=
// ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i * grid_cols + j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
#define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x)
#define MIN(a, b) ((a) <= (b) ? (a) : (b))
__global__ void calculate_temp(int iteration, // number of iteration
float *power, // power input
float *temp_src, // temperature input/output
float *temp_dst, // temperature input/output
int grid_cols, // Col of grid
int grid_rows, // Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, // Capacitance
float Rx, float Ry, float Rz, float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE]
[BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1, Ry_1, Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
step_div_Cap = step / Cap;
Rx_1 = 1 / Rx;
Ry_1 = 1 / Ry;
Rz_1 = 1 / Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
int small_block_cols = BLOCK_SIZE - iteration * 2; // EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows * by - border_rows;
int blkX = small_block_cols * bx - border_cols;
int blkYmax = blkY + BLOCK_SIZE - 1;
int blkXmax = blkX + BLOCK_SIZE - 1;
// calculate the global thread coordination
int yidx = blkY + ty;
int xidx = blkX + tx;
// load data if it is within the valid input range
int loadYidx = yidx, loadXidx = xidx;
int index = grid_cols * loadYidx + loadXidx;
if (IN_RANGE(loadYidx, 0, grid_rows - 1) &&
IN_RANGE(loadXidx, 0, grid_cols - 1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from
// global memory to shared memory
power_on_cuda[ty][tx] =
power[index]; // Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows - 1)
? BLOCK_SIZE - 1 - (blkYmax - grid_rows + 1)
: BLOCK_SIZE - 1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols - 1)
? BLOCK_SIZE - 1 - (blkXmax - grid_cols + 1)
: BLOCK_SIZE - 1;
int N = ty - 1;
int S = ty + 1;
int W = tx - 1;
int E = tx + 1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i = 0; i < iteration; i++) {
computed = false;
if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(ty, i + 1, BLOCK_SIZE - i - 2) &&
IN_RANGE(tx, validXmin, validXmax) &&
IN_RANGE(ty, validYmin, validYmax)) {
computed = true;
temp_t[ty][tx] =
temp_on_cuda[ty][tx] +
step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] -
2.0 * temp_on_cuda[ty][tx]) *
Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] -
2.0 * temp_on_cuda[ty][tx]) *
Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if (i == iteration - 1)
break;
if (computed) // Assign the computation range
temp_on_cuda[ty][tx] = temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index] = temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower, float *MatrixTemp[2], int col,
int row, int total_iterations, int num_iterations,
int blockCols, int blockRows, int borderCols,
int borderRows) {
#ifdef PREF
cudaStream_t stream1;
cudaStream_t stream2;
cudaStream_t stream3;
cudaStream_t stream4;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaMemPrefetchAsync(MatrixPower,row*col*sizeof(float), 0, stream1 );
cudaMemPrefetchAsync(MatrixTemp[0],sizeof(float)*row*col, 0, stream2 );
cudaMemPrefetchAsync(MatrixTemp[1],sizeof(float)*row*col, 0, stream3 );
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaStreamSynchronize(stream3);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock,0, stream4>>>(
MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src],
MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz,
step, time_elapsed);
cudaDeviceSynchronize();
}
return dst;
#else
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed = 0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t += num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(
MIN(num_iterations, total_iterations - t), MatrixPower, MatrixTemp[src],
MatrixTemp[dst], col, row, borderCols, borderRows, Cap, Rx, Ry, Rz,
step, time_elapsed);
cudaDeviceSynchronize();
}
return dst;
#endif
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> "
"<temp_file> <power_file> <output_file>\n",
argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid "
"(positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial "
"temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated "
"power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char **argv) {
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc, argv);
return EXIT_SUCCESS;
}
void run(int argc, char **argv) {
int size;
int grid_rows, grid_cols;
float *FilesavingTemp, *FilesavingPower, *MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if ((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[1])) <= 0 ||
(pyramid_height = atoi(argv[2])) <= 0 ||
(total_iterations = atoi(argv[3])) <= 0)
usage(argc, argv);
tfile = argv[4];
pfile = argv[5];
ofile = argv[6];
size = grid_rows * grid_cols;
/* --------------- pyramid parameters --------------- */
#define EXPAND_RATE \
2 // add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE / 2;
int borderRows = (pyramid_height)*EXPAND_RATE / 2;
int smallBlockCol = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE - (pyramid_height)*EXPAND_RATE;
int blockCols =
grid_cols / smallBlockCol + ((grid_cols % smallBlockCol == 0) ? 0 : 1);
int blockRows =
grid_rows / smallBlockRow + ((grid_rows % smallBlockRow == 0) ? 0 : 1);
float *MatrixTemp[2], *MatrixPower;
// FilesavingTemp = (float *)malloc(size * sizeof(float));
cudaMallocManaged(&MatrixTemp[0], sizeof(float) * size);
cudaMallocManaged(&MatrixTemp[1], sizeof(float) * size);
// FilesavingPower = (float *)malloc(size * sizeof(float));
MatrixOut = (float *)calloc(size, sizeof(float));
if (!MatrixTemp[1] || !MatrixTemp[0] || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, "
"%d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",
pyramid_height, grid_cols, grid_rows, borderCols, borderRows,
blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(MatrixTemp[0], grid_rows, grid_cols, tfile);
readinput(MatrixTemp[1], grid_rows, grid_cols, pfile);
// cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
// cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
// cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float) * size,
// cudaMemcpyHostToDevice);
cudaMallocManaged(&MatrixPower, sizeof(float) * size);
// cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float) * size,
// cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret= 0;
for (int i = 0; i < 1; i++){
ret = compute_tran_temp(MatrixPower, MatrixTemp, grid_cols, grid_rows,
total_iterations, pyramid_height, blockCols,
blockRows, borderCols, borderRows);
printf("Ending simulation\n");
}
// cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float) * size,
// cudaMemcpyDeviceToHost);
writeoutput(MatrixTemp[ret], grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
// free(MatrixOut);
}
|
efc12103e69d5d4a30893b7c7c1bec713372042f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generateLogNormal<real>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
long rows = THCTensor_(size)(state, t, 0);
long cols = THCTensor_(size)(state, t, 1);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
hipLaunchKernelGGL(( renormRowsL1<real>)
, dim3(grid), dim3(block), block.x * sizeof(real),
THCState_getCurrentStream(state), THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THAssert(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
long numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
long numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
if (n_sample == 1) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(( sampleMultinomialOnce)
, dim3(grid), dim3(block), block.x * sizeof(real),
THCState_getCurrentStream(state),
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, 4L);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THAssert(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THAssert(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, hiprand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, hiprand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_bernoulli), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THAssert(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t size = THCTensor_(nElement)(state, self); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
hipLaunchKernelGGL(( generate_bernoulli_tensor), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, hiprand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef NUM_BLOCKS
#endif
|
efc12103e69d5d4a30893b7c7c1bec713372042f.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generateLogNormal<real><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
long rows = THCTensor_(size)(state, t, 0);
long cols = THCTensor_(size)(state, t, 1);
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
renormRowsL1<real>
<<<grid, block, block.x * sizeof(real),
THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THAssert(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
long numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
long numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
if (n_sample == 1) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
sampleMultinomialOnce
<<<grid, block, block.x * sizeof(real),
THCState_getCurrentStream(state)>>>(
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
sampleMultinomialWithReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, 4L);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
sampleMultinomialWithoutReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THAssert(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THAssert(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, curand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, curand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_bernoulli<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THAssert(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t size = THCTensor_(nElement)(state, self); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
generate_bernoulli_tensor<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, curand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THAssert(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef NUM_BLOCKS
#endif
|
e4cb7015b4ef9c841bd4346a23c2127305e1ad63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define EIGEN_USE_GPU
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include <stdio.h>
using namespace tensorflow;
using namespace tensorflow::shape_inference;
#define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { hipError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == hipSuccess, errors::Internal(hipGetErrorName(err))); } while (false)
//------------------------------------------------------------------------
// CUDA kernel.
template <class T>
struct FusedBiasActKernelParams
{
const T* x; // [sizeX]
const T* b; // [sizeB] or NULL
const T* ref; // [sizeX] or NULL
T* y; // [sizeX]
int grad;
int axis;
int act;
float alpha;
float gain;
int sizeX;
int sizeB;
int stepB;
int loopX;
};
template <class T>
static __global__ void FusedBiasActKernel(const FusedBiasActKernelParams<T> p)
{
const float expRange = 80.0f;
const float halfExpRange = 40.0f;
const float seluScale = 1.0507009873554804934193349852946f;
const float seluAlpha = 1.6732632423543772848170429916717f;
// Loop over elements.
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
{
// Load and apply bias.
float x = (float)p.x[xi];
if (p.b)
x += (float)p.b[(xi / p.stepB) % p.sizeB];
float ref = (p.ref) ? (float)p.ref[xi] : 0.0f;
if (p.gain != 0.0f & p.act != 9)
ref /= p.gain;
// Evaluate activation func.
float y;
switch (p.act * 10 + p.grad)
{
// linear
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0f; break;
// relu
case 20: y = (x > 0.0f) ? x : 0.0f; break;
case 21: y = (ref > 0.0f) ? x : 0.0f; break;
case 22: y = 0.0f; break;
// lrelu
case 30: y = (x > 0.0f) ? x : x * p.alpha; break;
case 31: y = (ref > 0.0f) ? x : x * p.alpha; break;
case 32: y = 0.0f; break;
// tanh
case 40: { float c = expf(x); float d = 1.0f / c; y = (x < -expRange) ? -1.0f : (x > expRange) ? 1.0f : (c - d) / (c + d); } break;
case 41: y = x * (1.0f - ref * ref); break;
case 42: y = x * (1.0f - ref * ref) * (-2.0f * ref); break;
// sigmoid
case 50: y = (x < -expRange) ? 0.0f : 1.0f / (expf(-x) + 1.0f); break;
case 51: y = x * ref * (1.0f - ref); break;
case 52: y = x * ref * (1.0f - ref) * (1.0f - 2.0f * ref); break;
// elu
case 60: y = (x >= 0.0f) ? x : expf(x) - 1.0f; break;
case 61: y = (ref >= 0.0f) ? x : x * (ref + 1.0f); break;
case 62: y = (ref >= 0.0f) ? 0.0f : x * (ref + 1.0f); break;
// selu
case 70: y = (x >= 0.0f) ? seluScale * x : (seluScale * seluAlpha) * (expf(x) - 1.0f); break;
case 71: y = (ref >= 0.0f) ? x * seluScale : x * (ref + seluScale * seluAlpha); break;
case 72: y = (ref >= 0.0f) ? 0.0f : x * (ref + seluScale * seluAlpha); break;
// softplus
case 80: y = (x > expRange) ? x : logf(expf(x) + 1.0f); break;
case 81: y = x * (1.0f - expf(-ref)); break;
case 82: { float c = expf(-ref); y = x * c * (1.0f - c); } break;
// swish
case 90: y = (x < -expRange) ? 0.0f : x / (expf(-x) + 1.0f); break;
case 91: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? x : x * c * (ref + d) / (d * d); } break;
case 92: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? 0.0f : x * c * (ref * (2.0f - d) + 2.0f * d) / (d * d * d); } break;
}
// Apply gain and store.
p.y[xi] = (T)(y * p.gain);
}
}
//------------------------------------------------------------------------
// TensorFlow op.
template <class T>
struct FusedBiasActOp : public OpKernel
{
FusedBiasActKernelParams<T> m_attribs;
FusedBiasActOp(OpKernelConstruction* ctx) : OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad", &m_attribs.grad));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &m_attribs.axis));
OP_REQUIRES_OK(ctx, ctx->GetAttr("act", &m_attribs.act));
OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &m_attribs.alpha));
OP_REQUIRES_OK(ctx, ctx->GetAttr("gain", &m_attribs.gain));
OP_REQUIRES(ctx, m_attribs.grad >= 0, errors::InvalidArgument("grad must be non-negative"));
OP_REQUIRES(ctx, m_attribs.axis >= 0, errors::InvalidArgument("axis must be non-negative"));
OP_REQUIRES(ctx, m_attribs.act >= 0, errors::InvalidArgument("act must be non-negative"));
}
void Compute(OpKernelContext* ctx)
{
FusedBiasActKernelParams<T> p = m_attribs;
hipStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
const Tensor& x = ctx->input(0); // [...]
const Tensor& b = ctx->input(1); // [sizeB] or [0]
const Tensor& ref = ctx->input(2); // x.shape or [0]
p.x = x.flat<T>().data();
p.b = (b.NumElements()) ? b.flat<T>().data() : NULL;
p.ref = (ref.NumElements()) ? ref.flat<T>().data() : NULL;
OP_REQUIRES(ctx, b.NumElements() == 0 || m_attribs.axis < x.dims(), errors::InvalidArgument("axis out of bounds"));
OP_REQUIRES(ctx, b.dims() == 1, errors::InvalidArgument("b must have rank 1"));
OP_REQUIRES(ctx, b.NumElements() == 0 || b.NumElements() == x.dim_size(m_attribs.axis), errors::InvalidArgument("b has wrong number of elements"));
OP_REQUIRES(ctx, ref.NumElements() == ((p.grad == 0) ? 0 : x.NumElements()), errors::InvalidArgument("ref has wrong number of elements"));
OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("x is too large"));
p.sizeX = (int)x.NumElements();
p.sizeB = (int)b.NumElements();
p.stepB = 1;
for (int i = m_attribs.axis + 1; i < x.dims(); i++)
p.stepB *= (int)x.dim_size(i);
Tensor* y = NULL; // x.shape
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, x.shape(), &y));
p.y = y->flat<T>().data();
p.loopX = 4;
int blockSize = 4 * 32;
int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
void* args[] = {&p};
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)FusedBiasActKernel<T>, gridSize, blockSize, args, 0, stream));
}
};
REGISTER_OP("FusedBiasAct")
.Input ("x: T")
.Input ("b: T")
.Input ("ref: T")
.Output ("y: T")
.Attr ("T: {float, half}")
.Attr ("grad: int = 0")
.Attr ("axis: int = 1")
.Attr ("act: int = 0")
.Attr ("alpha: float = 0.0")
.Attr ("gain: float = 1.0");
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBiasActOp<float>);
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), FusedBiasActOp<Eigen::half>);
//------------------------------------------------------------------------
|
e4cb7015b4ef9c841bd4346a23c2127305e1ad63.cu
|
#define EIGEN_USE_GPU
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include <stdio.h>
using namespace tensorflow;
using namespace tensorflow::shape_inference;
#define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal(cudaGetErrorName(err))); } while (false)
//------------------------------------------------------------------------
// CUDA kernel.
template <class T>
struct FusedBiasActKernelParams
{
const T* x; // [sizeX]
const T* b; // [sizeB] or NULL
const T* ref; // [sizeX] or NULL
T* y; // [sizeX]
int grad;
int axis;
int act;
float alpha;
float gain;
int sizeX;
int sizeB;
int stepB;
int loopX;
};
template <class T>
static __global__ void FusedBiasActKernel(const FusedBiasActKernelParams<T> p)
{
const float expRange = 80.0f;
const float halfExpRange = 40.0f;
const float seluScale = 1.0507009873554804934193349852946f;
const float seluAlpha = 1.6732632423543772848170429916717f;
// Loop over elements.
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
{
// Load and apply bias.
float x = (float)p.x[xi];
if (p.b)
x += (float)p.b[(xi / p.stepB) % p.sizeB];
float ref = (p.ref) ? (float)p.ref[xi] : 0.0f;
if (p.gain != 0.0f & p.act != 9)
ref /= p.gain;
// Evaluate activation func.
float y;
switch (p.act * 10 + p.grad)
{
// linear
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0f; break;
// relu
case 20: y = (x > 0.0f) ? x : 0.0f; break;
case 21: y = (ref > 0.0f) ? x : 0.0f; break;
case 22: y = 0.0f; break;
// lrelu
case 30: y = (x > 0.0f) ? x : x * p.alpha; break;
case 31: y = (ref > 0.0f) ? x : x * p.alpha; break;
case 32: y = 0.0f; break;
// tanh
case 40: { float c = expf(x); float d = 1.0f / c; y = (x < -expRange) ? -1.0f : (x > expRange) ? 1.0f : (c - d) / (c + d); } break;
case 41: y = x * (1.0f - ref * ref); break;
case 42: y = x * (1.0f - ref * ref) * (-2.0f * ref); break;
// sigmoid
case 50: y = (x < -expRange) ? 0.0f : 1.0f / (expf(-x) + 1.0f); break;
case 51: y = x * ref * (1.0f - ref); break;
case 52: y = x * ref * (1.0f - ref) * (1.0f - 2.0f * ref); break;
// elu
case 60: y = (x >= 0.0f) ? x : expf(x) - 1.0f; break;
case 61: y = (ref >= 0.0f) ? x : x * (ref + 1.0f); break;
case 62: y = (ref >= 0.0f) ? 0.0f : x * (ref + 1.0f); break;
// selu
case 70: y = (x >= 0.0f) ? seluScale * x : (seluScale * seluAlpha) * (expf(x) - 1.0f); break;
case 71: y = (ref >= 0.0f) ? x * seluScale : x * (ref + seluScale * seluAlpha); break;
case 72: y = (ref >= 0.0f) ? 0.0f : x * (ref + seluScale * seluAlpha); break;
// softplus
case 80: y = (x > expRange) ? x : logf(expf(x) + 1.0f); break;
case 81: y = x * (1.0f - expf(-ref)); break;
case 82: { float c = expf(-ref); y = x * c * (1.0f - c); } break;
// swish
case 90: y = (x < -expRange) ? 0.0f : x / (expf(-x) + 1.0f); break;
case 91: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? x : x * c * (ref + d) / (d * d); } break;
case 92: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? 0.0f : x * c * (ref * (2.0f - d) + 2.0f * d) / (d * d * d); } break;
}
// Apply gain and store.
p.y[xi] = (T)(y * p.gain);
}
}
//------------------------------------------------------------------------
// TensorFlow op.
template <class T>
struct FusedBiasActOp : public OpKernel
{
FusedBiasActKernelParams<T> m_attribs;
FusedBiasActOp(OpKernelConstruction* ctx) : OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad", &m_attribs.grad));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &m_attribs.axis));
OP_REQUIRES_OK(ctx, ctx->GetAttr("act", &m_attribs.act));
OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &m_attribs.alpha));
OP_REQUIRES_OK(ctx, ctx->GetAttr("gain", &m_attribs.gain));
OP_REQUIRES(ctx, m_attribs.grad >= 0, errors::InvalidArgument("grad must be non-negative"));
OP_REQUIRES(ctx, m_attribs.axis >= 0, errors::InvalidArgument("axis must be non-negative"));
OP_REQUIRES(ctx, m_attribs.act >= 0, errors::InvalidArgument("act must be non-negative"));
}
void Compute(OpKernelContext* ctx)
{
FusedBiasActKernelParams<T> p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
const Tensor& x = ctx->input(0); // [...]
const Tensor& b = ctx->input(1); // [sizeB] or [0]
const Tensor& ref = ctx->input(2); // x.shape or [0]
p.x = x.flat<T>().data();
p.b = (b.NumElements()) ? b.flat<T>().data() : NULL;
p.ref = (ref.NumElements()) ? ref.flat<T>().data() : NULL;
OP_REQUIRES(ctx, b.NumElements() == 0 || m_attribs.axis < x.dims(), errors::InvalidArgument("axis out of bounds"));
OP_REQUIRES(ctx, b.dims() == 1, errors::InvalidArgument("b must have rank 1"));
OP_REQUIRES(ctx, b.NumElements() == 0 || b.NumElements() == x.dim_size(m_attribs.axis), errors::InvalidArgument("b has wrong number of elements"));
OP_REQUIRES(ctx, ref.NumElements() == ((p.grad == 0) ? 0 : x.NumElements()), errors::InvalidArgument("ref has wrong number of elements"));
OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("x is too large"));
p.sizeX = (int)x.NumElements();
p.sizeB = (int)b.NumElements();
p.stepB = 1;
for (int i = m_attribs.axis + 1; i < x.dims(); i++)
p.stepB *= (int)x.dim_size(i);
Tensor* y = NULL; // x.shape
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, x.shape(), &y));
p.y = y->flat<T>().data();
p.loopX = 4;
int blockSize = 4 * 32;
int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
void* args[] = {&p};
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)FusedBiasActKernel<T>, gridSize, blockSize, args, 0, stream));
}
};
REGISTER_OP("FusedBiasAct")
.Input ("x: T")
.Input ("b: T")
.Input ("ref: T")
.Output ("y: T")
.Attr ("T: {float, half}")
.Attr ("grad: int = 0")
.Attr ("axis: int = 1")
.Attr ("act: int = 0")
.Attr ("alpha: float = 0.0")
.Attr ("gain: float = 1.0");
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBiasActOp<float>);
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), FusedBiasActOp<Eigen::half>);
//------------------------------------------------------------------------
|
70d803003f9b2918eb91e79fe2567876d6674675.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaComon.cuh"
using namespace Beam;
GLOBAL void bmKernelColorGradient(unsigned int* buffer, u32 size)
{
u32 i = bIdx.x * bDim.x + tIdx.x;
i = i < size ? i : 0;
int blockSize = size/6;
int colorType = i / blockSize;
float f = (float)(i % blockSize) / (float)blockSize;
unsigned int c = (unsigned int)(255*f);
switch ( colorType )
{
case 0:
buffer[i] = c<<16;
break;
case 1:
buffer[i] = c<<8;
break;
case 2:
buffer[i] = c;
break;
case 3:
buffer[i] = (c<<16) | (c<<8);
break;
case 4:
buffer[i] = (c<<8) | c;
break;
case 5:
buffer[i] = (c<<16) | c;
break;
}
}
extern "C"
void bmStartColorGradient(u32* buffer, u32 w, u32 h)
{
#if CUDA
auto size = w*h;
hipLaunchKernelGGL(( bmKernelColorGradient), dim3((size+255)/256), dim3(256) , 0, 0, buffer, size);
#endif
}
|
70d803003f9b2918eb91e79fe2567876d6674675.cu
|
#include "CudaComon.cuh"
using namespace Beam;
GLOBAL void bmKernelColorGradient(unsigned int* buffer, u32 size)
{
u32 i = bIdx.x * bDim.x + tIdx.x;
i = i < size ? i : 0;
int blockSize = size/6;
int colorType = i / blockSize;
float f = (float)(i % blockSize) / (float)blockSize;
unsigned int c = (unsigned int)(255*f);
switch ( colorType )
{
case 0:
buffer[i] = c<<16;
break;
case 1:
buffer[i] = c<<8;
break;
case 2:
buffer[i] = c;
break;
case 3:
buffer[i] = (c<<16) | (c<<8);
break;
case 4:
buffer[i] = (c<<8) | c;
break;
case 5:
buffer[i] = (c<<16) | c;
break;
}
}
extern "C"
void bmStartColorGradient(u32* buffer, u32 w, u32 h)
{
#if CUDA
auto size = w*h;
bmKernelColorGradient<<< (size+255)/256, 256 >>>(buffer, size);
#endif
}
|
872cb21dd2ce87ba684fcdda364ab27f39d1adee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Deformable Convolutional Networks
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/reordering.h>
#include <spconv/reordering.cu.h>
#include <tensorview/helper_kernel.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename DType>
__device__ inline DType devIoU(DType const *const a, DType const *const b)
{
DType left = max(a[0], b[0]), right = min(a[2], b[2]);
DType top = max(a[1], b[1]), bottom = min(a[3], b[3]);
DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
DType interS = width * height;
DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
template <typename DType, int BLOCK_THREADS>
__global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh,
const DType *dev_boxes, unsigned long long *dev_mask)
{
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS);
const int col_size =
min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS);
__shared__ DType block_boxes[BLOCK_THREADS * 5];
if (threadIdx.x < col_size)
{
#pragma unroll
for (int i = 0; i < 5; ++i)
{
block_boxes[threadIdx.x * 5 + i] =
dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i];
}
}
__syncthreads();
if (threadIdx.x < row_size)
{
const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start)
{
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++)
{
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh)
{
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
|
872cb21dd2ce87ba684fcdda364ab27f39d1adee.cu
|
// ------------------------------------------------------------------
// Deformable Convolutional Networks
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/mp_helper.h>
#include <spconv/reordering.h>
#include <spconv/reordering.cu.h>
#include <tensorview/helper_kernel.cu.h>
#include <tensorview/helper_launch.h>
#include <tensorview/tensorview.h>
#include <type_traits>
#include <utility/timer.h>
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename DType>
__device__ inline DType devIoU(DType const *const a, DType const *const b)
{
DType left = max(a[0], b[0]), right = min(a[2], b[2]);
DType top = max(a[1], b[1]), bottom = min(a[3], b[3]);
DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
DType interS = width * height;
DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
template <typename DType, int BLOCK_THREADS>
__global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh,
const DType *dev_boxes, unsigned long long *dev_mask)
{
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS);
const int col_size =
min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS);
__shared__ DType block_boxes[BLOCK_THREADS * 5];
if (threadIdx.x < col_size)
{
#pragma unroll
for (int i = 0; i < 5; ++i)
{
block_boxes[threadIdx.x * 5 + i] =
dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i];
}
}
__syncthreads();
if (threadIdx.x < row_size)
{
const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start)
{
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++)
{
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh)
{
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
|
d83e56ac044b407732fb1277d79d1d27d61ebe19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mm_tiled.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dA = NULL;
hipMalloc(&dA, XSIZE*YSIZE);
float *dB = NULL;
hipMalloc(&dB, XSIZE*YSIZE);
float *dC = NULL;
hipMalloc(&dC, XSIZE*YSIZE);
int DIM = 2;
int N = XSIZE*YSIZE;
int GPUN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mm_tiled), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mm_tiled), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mm_tiled), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d83e56ac044b407732fb1277d79d1d27d61ebe19.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mm_tiled.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dA = NULL;
cudaMalloc(&dA, XSIZE*YSIZE);
float *dB = NULL;
cudaMalloc(&dB, XSIZE*YSIZE);
float *dC = NULL;
cudaMalloc(&dC, XSIZE*YSIZE);
int DIM = 2;
int N = XSIZE*YSIZE;
int GPUN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mm_tiled<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mm_tiled<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mm_tiled<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
322b911ea1463c2f0d6c302a09ae233cd221f7d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define clamp(x) (min(max((x), 0.0), 1.0))
#define MASK_RADIUS MASK_WIDTH/2
#define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1)
//@@ INSERT CODE HERE
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions when loading input list elements into the shared memory
//clamp your output values
__global__ void convolution(float *deviceInputImageData, const float * __restrict__ deviceMaskData, float * deviceOutputImageData, int imageChannels, int imageWidth, int imageHeight) {
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int i, j, k;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - MASK_RADIUS;
int col_i = col_o - MASK_RADIUS;
for (k = 0; k < imageChannels; k++) {
if ((row_i >= 0 && row_i < imageHeight) && (col_i >= 0 && col_i < imageWidth))
Ns[ty][tx] = deviceInputImageData[(row_i * imageWidth + col_i) * imageChannels + k];
else
Ns[ty][tx] = 0.0f;
__syncthreads();
float output = 0.0f;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) {
for (i = 0; i < MASK_WIDTH; i++) {
for (j = 0; j < MASK_WIDTH; j++) {
output += deviceMaskData[j * MASK_WIDTH + i] * Ns[i + ty][j + tx];
}
}
if (row_o < imageHeight && col_o < imageWidth)
deviceOutputImageData[(row_o * imageWidth + col_o) * imageChannels + k] = clamp(output);
}
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
//allocate device memory
hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **)&deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
//copy host memory to device
hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
//initialize thread block and kernel grid dimensions
//invoke CUDA kernel
dim3 dimGrid(ceil((float)imageWidth / O_TILE_WIDTH), ceil((float)imageHeight / O_TILE_WIDTH));
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH, 1);
hipLaunchKernelGGL(( convolution) , dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight);
hipDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
//copy results from device to host
hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ INSERT CODE HERE
//deallocate device memory
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
322b911ea1463c2f0d6c302a09ae233cd221f7d2.cu
|
#include <wb.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define clamp(x) (min(max((x), 0.0), 1.0))
#define MASK_RADIUS MASK_WIDTH/2
#define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1)
//@@ INSERT CODE HERE
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions when loading input list elements into the shared memory
//clamp your output values
__global__ void convolution(float *deviceInputImageData, const float * __restrict__ deviceMaskData, float * deviceOutputImageData, int imageChannels, int imageWidth, int imageHeight) {
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int i, j, k;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - MASK_RADIUS;
int col_i = col_o - MASK_RADIUS;
for (k = 0; k < imageChannels; k++) {
if ((row_i >= 0 && row_i < imageHeight) && (col_i >= 0 && col_i < imageWidth))
Ns[ty][tx] = deviceInputImageData[(row_i * imageWidth + col_i) * imageChannels + k];
else
Ns[ty][tx] = 0.0f;
__syncthreads();
float output = 0.0f;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) {
for (i = 0; i < MASK_WIDTH; i++) {
for (j = 0; j < MASK_WIDTH; j++) {
output += deviceMaskData[j * MASK_WIDTH + i] * Ns[i + ty][j + tx];
}
}
if (row_o < imageHeight && col_o < imageWidth)
deviceOutputImageData[(row_o * imageWidth + col_o) * imageChannels + k] = clamp(output);
}
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
//allocate device memory
cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
//copy host memory to device
cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
//initialize thread block and kernel grid dimensions
//invoke CUDA kernel
dim3 dimGrid(ceil((float)imageWidth / O_TILE_WIDTH), ceil((float)imageHeight / O_TILE_WIDTH));
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH, 1);
convolution <<<dimGrid, dimBlock>>> (deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
//copy results from device to host
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ INSERT CODE HERE
//deallocate device memory
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
5be5ad7346371e449df21d20096e57b5ea960f73.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
using namespace std;
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdlib.h>
union FP32
{
unsigned int i;
float f;
};
__global__ void mma_test(float* d, float* a, float* b, float* c){
float c_array[4];
float d_array[4];
float a_array[4];
float b_array[2];
for (int i = 0; i < 4; ++i) {
a_array[i] = a[i];
c_array[i] = c[i];
d_array[i] = d[i];
if(i<2){
b_array[i] =b[i];
}
}
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n\t"
: "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]), "+f"(d_array[3]) : "f"(a_array[0]), "f"(a_array[1]), "f"(a_array[2]), "f"(a_array[3]), "f"(b_array[0]), "f"(b_array[1]) , "f"(c_array[0]), "f"(c_array[1]), "f"(c_array[2]), "f"(c_array[3]));
}
//__global__ void mma_test(float* d, float* a, float* b, float* c){
// asm volatile(
// "ld.param.u64 %rd1, [_Z4testPfS_S_S__param_0];\n\t"
// ".reg .b32 a<4>, b<4>, c<8>,d<8>;\n\t"
// "wmma.load.a.sync.aligned.m16n16k8.global.row.tf32 {a0, a1, a2, a3}, [%1];\n\t"
// "wmma.load.b.sync.aligned.m16n16k8.global.col.tf32 {b0, b1, b2, b3}, [%2];\n\t"
// "wmma.load.c.sync.aligned.m16n16k8.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
// "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n\t"
// "wmma.mma.sync.aligned.m16n16k8.row.col.f32.tf32.tf32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3}, {b0, b1, b2, b3}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
// "wmma.store.d.sync.aligned.m16n16k8.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(d): "l"(a), "l"(b), "l"(c));
//}
void InitZero(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitOne(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void Init(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0 * float(i);
}
}
void Init_3f800000(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
FP32 fp32;
fp32.i = 0x3f800000;
a[i] = fp32.f;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
float* host_a=(float*)malloc(sizeof(float) * size/2);
float* host_b=(float*)malloc(sizeof(float) * size/2);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
float* device_a=NULL;
float* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(float) * size/2);
hipMalloc((void**)(&device_b), sizeof(float) * size/2);
hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size/2);
Init_3f800000(host_b, size/2);
InitZero(host_c, size);
InitZero(host_d, size);
FP32 fp32;
fp32.i = 0x3f800000; host_a[0]=fp32.f;
fp32.i = 0x4b000000; host_a[1]=fp32.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(float)* size/2, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(float)* size/2, hipMemcpyHostToDevice);
hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mma_test), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b, device_c);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
5be5ad7346371e449df21d20096e57b5ea960f73.cu
|
#include <iostream>
#include <cstdlib>
using namespace std;
#include <sys/time.h>
#include <cuda.h>
#include <unistd.h>
#include <stdlib.h>
union FP32
{
unsigned int i;
float f;
};
__global__ void mma_test(float* d, float* a, float* b, float* c){
float c_array[4];
float d_array[4];
float a_array[4];
float b_array[2];
for (int i = 0; i < 4; ++i) {
a_array[i] = a[i];
c_array[i] = c[i];
d_array[i] = d[i];
if(i<2){
b_array[i] =b[i];
}
}
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n\t"
: "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]), "+f"(d_array[3]) : "f"(a_array[0]), "f"(a_array[1]), "f"(a_array[2]), "f"(a_array[3]), "f"(b_array[0]), "f"(b_array[1]) , "f"(c_array[0]), "f"(c_array[1]), "f"(c_array[2]), "f"(c_array[3]));
}
//__global__ void mma_test(float* d, float* a, float* b, float* c){
// asm volatile(
// "ld.param.u64 %rd1, [_Z4testPfS_S_S__param_0];\n\t"
// ".reg .b32 a<4>, b<4>, c<8>,d<8>;\n\t"
// "wmma.load.a.sync.aligned.m16n16k8.global.row.tf32 {a0, a1, a2, a3}, [%1];\n\t"
// "wmma.load.b.sync.aligned.m16n16k8.global.col.tf32 {b0, b1, b2, b3}, [%2];\n\t"
// "wmma.load.c.sync.aligned.m16n16k8.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
// "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n\t"
// "wmma.mma.sync.aligned.m16n16k8.row.col.f32.tf32.tf32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3}, {b0, b1, b2, b3}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
// "wmma.store.d.sync.aligned.m16n16k8.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(d): "l"(a), "l"(b), "l"(c));
//}
void InitZero(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitOne(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void Init(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0 * float(i);
}
}
void Init_3f800000(float * a, const int n) {
for ( int i = 0; i < n; i++ ) {
FP32 fp32;
fp32.i = 0x3f800000;
a[i] = fp32.f;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
float* host_a=(float*)malloc(sizeof(float) * size/2);
float* host_b=(float*)malloc(sizeof(float) * size/2);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
float* device_a=NULL;
float* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(float) * size/2);
cudaMalloc((void**)(&device_b), sizeof(float) * size/2);
cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size/2);
Init_3f800000(host_b, size/2);
InitZero(host_c, size);
InitZero(host_d, size);
FP32 fp32;
fp32.i = 0x3f800000; host_a[0]=fp32.f;
fp32.i = 0x4b000000; host_a[1]=fp32.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(float)* size/2, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(float)* size/2, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
mma_test<<<1,32>>>(device_d, device_a, device_b, device_c);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
834fceacafa1cccce36d196f6e667910c7c4ad67.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <functional>
#include <iostream>
#include <random>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "parallel_histogram.cuh"
void cpu_histogram(const std::vector<unsigned int> & input, std::vector<unsigned int> & rHistogram, unsigned int binWidth)
{
for (int i = 0; i < input.size(); ++i)
{
int bin_ = input[i] / binWidth;
rHistogram[bin_]++;
}
}
int main(void)
{
std::random_device random_device_;
std::mt19937 generator_(random_device_());
std::uniform_int_distribution<unsigned int> distribution_(0, 255);
const unsigned long kNumElements = 32768;
const unsigned int kNumBytes = kNumElements * sizeof(unsigned int);
const unsigned int kNumBins = 8;
const unsigned int kBinWidth = 32;
std::cout << "Generating random vector in range [0, 255] of " << kNumElements << " elements...\n";
std::vector<unsigned int> h_input_(kNumElements);
for (int i = 0; i < h_input_.size(); ++i)
h_input_[i] = distribution_(generator_);
// --- CPU ---------------------------------------------------------------- //
std::cout << "Executing histogram in CPU...\n";
std::vector<unsigned int> h_histogram_(kNumBins);
cpu_histogram(h_input_, h_histogram_, kBinWidth);
std::cout << "Result is: \n";
for (int i = 0; i < kNumBins; ++i)
std::cout << "[" << i << "]:" << h_histogram_[i] << " ";
std::cout << "\n";
// --- GPU ---------------------------------------------------------------- //
std::cout << "Executing histogram in GPU...\n";
const int threads_per_block_ = 1024;
const int blocks_per_grid_ = kNumElements / threads_per_block_;
hipSetDevice(0);
unsigned int *d_input_;
unsigned int *d_histogram_;
std::vector<int> h_dhistogram_(kNumBins);
hipMalloc((void**)&d_input_, kNumBytes);
hipMalloc((void**)&d_histogram_, sizeof(int) * kNumBins); // Overallocated
hipMemcpy(d_input_, h_input_.data(), kNumBytes, hipMemcpyHostToDevice);
dim3 tpb_(threads_per_block_, 1, 1);
dim3 bpg_(blocks_per_grid_, 1, 1);
std::cout << "Threads Per Block: " << tpb_.x << "\n";
std::cout << "Blocks Per Grid: " << bpg_.x << "\n";
// Naive GPU implementation
//gpu_histogram_naive<<<bpg_, tpb_>>>(d_input_, d_histogram_, kBinWidth);
// Atomic GPU implementation
//gpu_histogram_atomic<<<bpg_, tpb_>>>(d_input_, d_histogram_, kBinWidth);
// Strided GPU implementation
dim3 tpb_strided_(threads_per_block_, 1, 1);
dim3 bpg_strided_(256, 1, 1);
//gpu_histogram_atomic_strided<<<bpg_strided_, tpb_strided_>>>(d_input_, d_histogram_, kBinWidth, kNumElements);
// Strided privatized GPU implementation
hipLaunchKernelGGL(( gpu_histogram_atomic_strided_privatized), dim3(bpg_strided_), dim3(tpb_strided_), kNumBins * sizeof(unsigned int), 0, d_input_, d_histogram_, kNumBins, kBinWidth, kNumElements);
hipMemcpy(h_dhistogram_.data(), d_histogram_, sizeof(int) * kNumBins, hipMemcpyDeviceToHost);
hipFree(d_input_);
hipFree(d_histogram_);
hipDeviceReset();
std::cout << "Result is: \n";
for (int i = 0; i < kNumBins; ++i)
std::cout << "[" << i << "]:" << h_dhistogram_[i] << " ";
std::cout << "\n";
}
|
834fceacafa1cccce36d196f6e667910c7c4ad67.cu
|
#include <functional>
#include <iostream>
#include <random>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "parallel_histogram.cuh"
void cpu_histogram(const std::vector<unsigned int> & input, std::vector<unsigned int> & rHistogram, unsigned int binWidth)
{
for (int i = 0; i < input.size(); ++i)
{
int bin_ = input[i] / binWidth;
rHistogram[bin_]++;
}
}
int main(void)
{
std::random_device random_device_;
std::mt19937 generator_(random_device_());
std::uniform_int_distribution<unsigned int> distribution_(0, 255);
const unsigned long kNumElements = 32768;
const unsigned int kNumBytes = kNumElements * sizeof(unsigned int);
const unsigned int kNumBins = 8;
const unsigned int kBinWidth = 32;
std::cout << "Generating random vector in range [0, 255] of " << kNumElements << " elements...\n";
std::vector<unsigned int> h_input_(kNumElements);
for (int i = 0; i < h_input_.size(); ++i)
h_input_[i] = distribution_(generator_);
// --- CPU ---------------------------------------------------------------- //
std::cout << "Executing histogram in CPU...\n";
std::vector<unsigned int> h_histogram_(kNumBins);
cpu_histogram(h_input_, h_histogram_, kBinWidth);
std::cout << "Result is: \n";
for (int i = 0; i < kNumBins; ++i)
std::cout << "[" << i << "]:" << h_histogram_[i] << " ";
std::cout << "\n";
// --- GPU ---------------------------------------------------------------- //
std::cout << "Executing histogram in GPU...\n";
const int threads_per_block_ = 1024;
const int blocks_per_grid_ = kNumElements / threads_per_block_;
cudaSetDevice(0);
unsigned int *d_input_;
unsigned int *d_histogram_;
std::vector<int> h_dhistogram_(kNumBins);
cudaMalloc((void**)&d_input_, kNumBytes);
cudaMalloc((void**)&d_histogram_, sizeof(int) * kNumBins); // Overallocated
cudaMemcpy(d_input_, h_input_.data(), kNumBytes, cudaMemcpyHostToDevice);
dim3 tpb_(threads_per_block_, 1, 1);
dim3 bpg_(blocks_per_grid_, 1, 1);
std::cout << "Threads Per Block: " << tpb_.x << "\n";
std::cout << "Blocks Per Grid: " << bpg_.x << "\n";
// Naive GPU implementation
//gpu_histogram_naive<<<bpg_, tpb_>>>(d_input_, d_histogram_, kBinWidth);
// Atomic GPU implementation
//gpu_histogram_atomic<<<bpg_, tpb_>>>(d_input_, d_histogram_, kBinWidth);
// Strided GPU implementation
dim3 tpb_strided_(threads_per_block_, 1, 1);
dim3 bpg_strided_(256, 1, 1);
//gpu_histogram_atomic_strided<<<bpg_strided_, tpb_strided_>>>(d_input_, d_histogram_, kBinWidth, kNumElements);
// Strided privatized GPU implementation
gpu_histogram_atomic_strided_privatized<<<bpg_strided_, tpb_strided_, kNumBins * sizeof(unsigned int)>>>(d_input_, d_histogram_, kNumBins, kBinWidth, kNumElements);
cudaMemcpy(h_dhistogram_.data(), d_histogram_, sizeof(int) * kNumBins, cudaMemcpyDeviceToHost);
cudaFree(d_input_);
cudaFree(d_histogram_);
cudaDeviceReset();
std::cout << "Result is: \n";
for (int i = 0; i < kNumBins; ++i)
std::cout << "[" << i << "]:" << h_dhistogram_[i] << " ";
std::cout << "\n";
}
|
977ba422287748eda85bb5e54479dcdc20ef8c9d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shadertoy.cuh"
extern "C" __global__ void mainImage()
{
float2 fragCoord = calcFragCoord();
float u = fragCoord.x / iResolution.x;
float v = fragCoord.y / iResolution.y;
if ((fragCoord.x < iResolution.x) && (fragCoord.y < iResolution.y))
{
uchar4* fragColor = calcFragColor(fragCoord);
*fragColor = make_uchar4(u * 255, v * 255, 122 + 122 * sin(iGlobalTime), 255);
}
}
|
977ba422287748eda85bb5e54479dcdc20ef8c9d.cu
|
#include "shadertoy.cuh"
extern "C" __global__ void mainImage()
{
float2 fragCoord = calcFragCoord();
float u = fragCoord.x / iResolution.x;
float v = fragCoord.y / iResolution.y;
if ((fragCoord.x < iResolution.x) && (fragCoord.y < iResolution.y))
{
uchar4* fragColor = calcFragColor(fragCoord);
*fragColor = make_uchar4(u * 255, v * 255, 122 + 122 * sin(iGlobalTime), 255);
}
}
|
a26cbc60c75c7f545b95bd54756a7c7edfd678df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Device.h"
#include <iostream>
#include "MathTools.h"
#include "cudaTools.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include <omp.h>
#include "Indice1D.h"
__host__ static float resultatTheorique();
__host__ static void runGPU(float*, long n);
__global__ static void kernelMonteCarlo(hiprandState_t*, float* ptrDevOut, long n);
__global__ static void setup_kernel_rand(hiprandState_t* tabGeneratorThread, int deviceId);
__device__ static int work(float x, float y, float dx);
__device__ static float f(float x);
__device__ static void reductionIntraBlock(int* tabSM, long n);
__device__ static void reductionInterBlock(int* tabSM, float*);
__device__ __host__ static float getXmin();
__device__ __host__ static float getXmax();
__device__ __host__ static float getYmin();
__device__ __host__ static float getYmax();
__host__ bool useMonteCarloMultiOMP(long n)
{
float resEmpirique = 0;
float resTheorique = resultatTheorique();
runGPU(&resEmpirique, n);
bool resultat = MathTools::isEquals(resTheorique, resEmpirique, (float) 1e-4);
std::cout << "Rsultat thorique : " << resTheorique << std::endl;
std::cout << "Rsultat empirique : " << resEmpirique << std::endl;
std::cout << resultat << std::endl;
return resultat;
}
__host__ void runGPU(float* ptrOut, long n)
{
dim3 dg = dim3(256, 1, 1);
dim3 db = dim3(256, 1, 1);
Device::assertDim(dg, db);
int nbDevice = Device::getDeviceCount();
float result[nbDevice];
#pragma omp parallel for
for (int deviceId = 0; deviceId < nbDevice; ++deviceId)
{
HANDLE_ERROR(hipSetDevice(deviceId));
hiprandState_t* ptrDevGenerators;
float* ptrDevOut;
HANDLE_ERROR(hipMalloc((void** )&ptrDevGenerators, db.x * sizeof(hiprandState_t*)));
HANDLE_ERROR(hipMalloc((void** )&ptrDevOut, sizeof(float)));
HANDLE_ERROR(hipMemset(ptrDevOut, 0, sizeof(float)));
hipLaunchKernelGGL(( setup_kernel_rand), dim3(dg), dim3(db), 0, 0, ptrDevGenerators, deviceId);
hipLaunchKernelGGL(( kernelMonteCarlo), dim3(dg), dim3(db), db.x*sizeof(int), 0, ptrDevGenerators, ptrDevOut, deviceId < nbDevice -1 ? n/nbDevice : n/nbDevice+n%nbDevice);
HANDLE_ERROR(hipMemcpy(&result[deviceId], ptrDevOut, sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(ptrDevOut));
HANDLE_ERROR(hipFree(ptrDevGenerators));
}
*ptrOut = 0;
for (int i = 0; i < nbDevice; ++i)
*ptrOut += result[i];
*ptrOut = 2.0 * *ptrOut / (float) n * (getXmax() - getXmin()) * getYmax();
}
__global__ void setup_kernel_rand(hiprandState_t* tabGeneratorThread, int deviceId)
{
int tid = threadIdx.x;
int deltaSeed = deviceId * INT_MAX;
int deltaSequence = deviceId * 100;
int deltaOffset = deviceId * 100;
int seed = 1234 + deltaSeed;
int sequenceNumber = tid + deltaSequence;
int offset = deltaOffset;
hiprand_init(seed, sequenceNumber, offset, &tabGeneratorThread[tid]);
}
__global__ void kernelMonteCarlo(hiprandState_t* ptrDevGenerators, float* ptrDevOut, long n)
{
extern __shared__ int tabForBlock[];
int tid = Indice1D::tid();
const int NB_THREAD = Indice1D::nbThread();
hiprandState_t localState = ptrDevGenerators[threadIdx.x];
long s = tid;
int sum = 0;
float dx = (getXmax() - getXmin()) / (float) (NB_THREAD);
while (s < n)
{
sum += work(hiprand_uniform(&localState), hiprand_uniform(&localState), dx);
s += NB_THREAD;
}
tabForBlock[threadIdx.x] = sum;
__syncthreads();
reductionIntraBlock(tabForBlock, blockDim.x);
reductionInterBlock(tabForBlock, ptrDevOut);
}
__device__ void reductionIntraBlock(int* tabSM, long n)
{
long moitie = n / 2;
while (moitie >= 1)
{
int tid = threadIdx.x;
if (tid < moitie)
tabSM[tid] += tabSM[tid + moitie];
moitie /= 2;
__syncthreads();
}
}
__device__ void reductionInterBlock(int* tabSM, float* ptrDevOut)
{
if (threadIdx.x == 0)
atomicAdd(ptrDevOut, tabSM[0]);
}
__device__ int work(float x, float y, float dx)
{
float finalY = (getYmax() - getYmin()) * y + getYmin();
float minX = getXmin() + dx * (threadIdx.x + gridDim.x * blockIdx.x);
float maxX = minX + dx;
float finalX = (maxX - minX) * dx + minX;
return finalY <= f(finalX) ? 1 : 0;
}
__host__ float resultatTheorique()
{
return 3.1415926535897932384626433832;
}
__device__ __host__ float getXmin()
{
return -1.0;
}
__device__ __host__ float getXmax()
{
return 1.0;
}
__device__ __host__ float getYmin()
{
return 0.0;
}
__device__ __host__ float getYmax()
{
return 1.0;
}
__device__ float f(float x)
{
return sqrt(1 - x * x);
}
|
a26cbc60c75c7f545b95bd54756a7c7edfd678df.cu
|
#include "Device.h"
#include <iostream>
#include "MathTools.h"
#include "cudaTools.h"
#include "curand.h"
#include "curand_kernel.h"
#include <omp.h>
#include "Indice1D.h"
__host__ static float resultatTheorique();
__host__ static void runGPU(float*, long n);
__global__ static void kernelMonteCarlo(curandState*, float* ptrDevOut, long n);
__global__ static void setup_kernel_rand(curandState* tabGeneratorThread, int deviceId);
__device__ static int work(float x, float y, float dx);
__device__ static float f(float x);
__device__ static void reductionIntraBlock(int* tabSM, long n);
__device__ static void reductionInterBlock(int* tabSM, float*);
__device__ __host__ static float getXmin();
__device__ __host__ static float getXmax();
__device__ __host__ static float getYmin();
__device__ __host__ static float getYmax();
__host__ bool useMonteCarloMultiOMP(long n)
{
float resEmpirique = 0;
float resTheorique = resultatTheorique();
runGPU(&resEmpirique, n);
bool resultat = MathTools::isEquals(resTheorique, resEmpirique, (float) 1e-4);
std::cout << "Résultat théorique : " << resTheorique << std::endl;
std::cout << "Résultat empirique : " << resEmpirique << std::endl;
std::cout << resultat << std::endl;
return resultat;
}
__host__ void runGPU(float* ptrOut, long n)
{
dim3 dg = dim3(256, 1, 1);
dim3 db = dim3(256, 1, 1);
Device::assertDim(dg, db);
int nbDevice = Device::getDeviceCount();
float result[nbDevice];
#pragma omp parallel for
for (int deviceId = 0; deviceId < nbDevice; ++deviceId)
{
HANDLE_ERROR(cudaSetDevice(deviceId));
curandState* ptrDevGenerators;
float* ptrDevOut;
HANDLE_ERROR(cudaMalloc((void** )&ptrDevGenerators, db.x * sizeof(curandState*)));
HANDLE_ERROR(cudaMalloc((void** )&ptrDevOut, sizeof(float)));
HANDLE_ERROR(cudaMemset(ptrDevOut, 0, sizeof(float)));
setup_kernel_rand<<<dg, db>>>(ptrDevGenerators, deviceId);
kernelMonteCarlo<<<dg, db, db.x*sizeof(int)>>>(ptrDevGenerators, ptrDevOut, deviceId < nbDevice -1 ? n/nbDevice : n/nbDevice+n%nbDevice);
HANDLE_ERROR(cudaMemcpy(&result[deviceId], ptrDevOut, sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(ptrDevOut));
HANDLE_ERROR(cudaFree(ptrDevGenerators));
}
*ptrOut = 0;
for (int i = 0; i < nbDevice; ++i)
*ptrOut += result[i];
*ptrOut = 2.0 * *ptrOut / (float) n * (getXmax() - getXmin()) * getYmax();
}
__global__ void setup_kernel_rand(curandState* tabGeneratorThread, int deviceId)
{
int tid = threadIdx.x;
int deltaSeed = deviceId * INT_MAX;
int deltaSequence = deviceId * 100;
int deltaOffset = deviceId * 100;
int seed = 1234 + deltaSeed;
int sequenceNumber = tid + deltaSequence;
int offset = deltaOffset;
curand_init(seed, sequenceNumber, offset, &tabGeneratorThread[tid]);
}
__global__ void kernelMonteCarlo(curandState* ptrDevGenerators, float* ptrDevOut, long n)
{
extern __shared__ int tabForBlock[];
int tid = Indice1D::tid();
const int NB_THREAD = Indice1D::nbThread();
curandState localState = ptrDevGenerators[threadIdx.x];
long s = tid;
int sum = 0;
float dx = (getXmax() - getXmin()) / (float) (NB_THREAD);
while (s < n)
{
sum += work(curand_uniform(&localState), curand_uniform(&localState), dx);
s += NB_THREAD;
}
tabForBlock[threadIdx.x] = sum;
__syncthreads();
reductionIntraBlock(tabForBlock, blockDim.x);
reductionInterBlock(tabForBlock, ptrDevOut);
}
__device__ void reductionIntraBlock(int* tabSM, long n)
{
long moitie = n / 2;
while (moitie >= 1)
{
int tid = threadIdx.x;
if (tid < moitie)
tabSM[tid] += tabSM[tid + moitie];
moitie /= 2;
__syncthreads();
}
}
__device__ void reductionInterBlock(int* tabSM, float* ptrDevOut)
{
if (threadIdx.x == 0)
atomicAdd(ptrDevOut, tabSM[0]);
}
__device__ int work(float x, float y, float dx)
{
float finalY = (getYmax() - getYmin()) * y + getYmin();
float minX = getXmin() + dx * (threadIdx.x + gridDim.x * blockIdx.x);
float maxX = minX + dx;
float finalX = (maxX - minX) * dx + minX;
return finalY <= f(finalX) ? 1 : 0;
}
__host__ float resultatTheorique()
{
return 3.1415926535897932384626433832;
}
__device__ __host__ float getXmin()
{
return -1.0;
}
__device__ __host__ float getXmax()
{
return 1.0;
}
__device__ __host__ float getYmin()
{
return 0.0;
}
__device__ __host__ float getYmax()
{
return 1.0;
}
__device__ float f(float x)
{
return sqrt(1 - x * x);
}
|
847e302eed277cd6ab29acfea0191a8d0b84c22c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cumlprims/opg/linalg/qr_based_svd.hpp>
#include <cumlprims/opg/matrix/matrix_utils.hpp>
#include <cumlprims/opg/stats/cov.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams);
ML::truncCompExpVars<T, mg_solver>(
handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]);
T scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true);
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
/**
* @brief performs MNMG fit operation for the pca
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param input: input data
* @input param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Reference issue https://github.com/rapidsai/cuml/issues/2470
auto n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) {
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
} else if (prms.algorithm == mg_solver::QR) {
const raft::handle_t& h = handle;
hipStream_t stream = h.get_stream();
const auto& comm = h.get_comms();
// Center the data
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
// Allocate Q, S and V and call QR
std::vector<Matrix::Data<T>*> uMatrixParts;
Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream);
rmm::device_uvector<T> sVector(prms.n_cols, stream);
rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream);
RAFT_CUDA_TRY(hipMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T)));
LinAlg::opg::svdQR(h,
sVector.data(),
uMatrixParts,
vMatrix.data(),
true,
true,
prms.tol,
prms.n_iterations,
input_data,
input_desc,
rank);
// sign flip
sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams);
// Calculate instance variables
rmm::device_uvector<T> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream);
T scalar = 1.0 / (prms.n_rows - 1);
raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream);
// Re-add mean to centered data
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
const Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
T* singular_vals,
T* mu,
const paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
raft::stats::meanCenter(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
// We want to update the API of this function, and other functions with
// regards to https://github.com/rapidsai/cuml/issues/2471
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
hipStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
inverse_transform_impl(handle,
trans_data,
trans_desc,
components,
input_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
Matrix::Data<T>** trans_input,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
hipStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(hipStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
847e302eed277cd6ab29acfea0191a8d0b84c22c.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pca.cuh"
#include <cuml/decomposition/pca.hpp>
#include <cuml/decomposition/pca_mg.hpp>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <cumlprims/opg/linalg/qr_based_svd.hpp>
#include <cumlprims/opg/matrix/matrix_utils.hpp>
#include <cumlprims/opg/stats/cov.hpp>
#include <cumlprims/opg/stats/mean.hpp>
#include <cumlprims/opg/stats/mean_center.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
const auto& comm = handle.get_comms();
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]);
auto cov_data_size = cov_data.size();
Matrix::Data<T> cov{cov_data.data(), cov_data_size};
Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams);
ML::truncCompExpVars<T, mg_solver>(
handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]);
T scalar = (prms.n_rows - 1);
raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true);
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
/**
* @brief performs MNMG fit operation for the pca
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param input: input data
* @input param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Reference issue https://github.com/rapidsai/cuml/issues/2470
auto n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) {
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
} else if (prms.algorithm == mg_solver::QR) {
const raft::handle_t& h = handle;
cudaStream_t stream = h.get_stream();
const auto& comm = h.get_comms();
// Center the data
Matrix::Data<T> mu_data{mu, prms.n_cols};
Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams);
Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
// Allocate Q, S and V and call QR
std::vector<Matrix::Data<T>*> uMatrixParts;
Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream);
rmm::device_uvector<T> sVector(prms.n_cols, stream);
rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream);
RAFT_CUDA_TRY(cudaMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T)));
LinAlg::opg::svdQR(h,
sVector.data(),
uMatrixParts,
vMatrix.data(),
true,
true,
prms.tol,
prms.n_iterations,
input_data,
input_desc,
rank);
// sign flip
sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams);
// Calculate instance variables
rmm::device_uvector<T> explained_var_all(prms.n_cols, stream);
rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream);
T scalar = 1.0 / (prms.n_rows - 1);
raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream);
raft::matrix::ratio(
handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream);
raft::matrix::truncZeroOrigin(explained_var_all.data(),
prms.n_cols,
explained_var,
prms.n_components,
std::size_t(1),
stream);
raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(),
prms.n_cols,
explained_var_ratio,
prms.n_components,
std::size_t(1),
stream);
raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream);
raft::matrix::truncZeroOrigin(
vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream);
Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream);
// Re-add mean to centered data
Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
const Matrix::PartDescriptor input_desc,
T* components,
std::vector<Matrix::Data<T>*>& trans_input,
T* singular_vals,
T* mu,
const paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < input.size(); i++) {
auto si = i % n_streams;
raft::stats::meanCenter(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
components,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]);
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_cols * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param components: principal components of the input data
* @output param trans_input: transformed input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
T* components,
Matrix::Data<T>** trans_input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
// We want to update the API of this function, and other functions with
// regards to https://github.com/rapidsai/cuml/issues/2471
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& trans_input,
Matrix::PartDescriptor trans_input_desc,
T* components,
std::vector<Matrix::Data<T>*>& input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
cudaStream_t* streams,
std::uint32_t n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks;
if (prms.whiten) {
T scalar = T(1 / sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
raft::matrix::matrixVectorBinaryMultSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
}
for (std::size_t i = 0; i < local_blocks.size(); i++) {
auto si = i % n_streams;
T alpha = T(1);
T beta = T(0);
raft::linalg::gemm(handle,
trans_input[i]->ptr,
local_blocks[i]->size,
prms.n_components,
components,
input[i]->ptr,
local_blocks[i]->size,
prms.n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::stats::meanAdd(input[i]->ptr,
input[i]->ptr,
mu,
prms.n_cols,
local_blocks[i]->size,
false,
true,
streams[si]);
}
if (prms.whiten) {
raft::matrix::matrixVectorBinaryDivSkipZero(
components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]);
T scalar = T(sqrt(prms.n_rows - 1));
raft::linalg::scalarMultiply(
components, components, scalar, prms.n_rows * prms.n_components, streams[0]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
}
/**
* @brief performs MNMG inverse transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param trans_input: transformed input data
* @input param components: principal components of the input data
* @output param input: input data
* @input param singular_vals: singular values of the data
* @input param mu: mean of every column in input
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void inverse_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** trans_input,
T* components,
Matrix::Data<T>** input,
T* singular_vals,
T* mu,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
inverse_transform_impl(handle,
trans_data,
trans_desc,
components,
input_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
/**
* @brief performs MNMG fit and transform operation for the pca.
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @output param trans_input: transformed input data
* @output param components: principal components of the input data
* @output param explained_var: explained var
* @output param explained_var_ratio: the explained var ratio
* @output param singular_vals: singular values of the data
* @output param mu: mean of every column in input
* @output param noise_vars: variance of the noise
* @input param prms: data structure that includes all the parameters from input size to algorithm
* @input param verbose
*/
template <typename T>
void fit_transform_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<T>** input,
Matrix::Data<T>** trans_input,
T* components,
T* explained_var,
T* explained_var_ratio,
T* singular_vals,
T* mu,
T* noise_vars,
paramsPCAMG prms,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts);
// TODO: These streams should come from raft::handle_t
auto n_streams = n_parts;
cudaStream_t streams[n_streams];
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
streams,
n_streams,
verbose);
transform_impl(handle,
input_data,
input_desc,
components,
trans_data,
singular_vals,
mu,
prms,
streams,
n_streams,
verbose);
sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams);
for (std::uint32_t i = 0; i < n_streams; i++) {
handle.sync_stream(streams[i]);
}
for (std::uint32_t i = 0; i < n_streams; i++) {
RAFT_CUDA_TRY(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::floatData_t** input,
Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void fit_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::doubleData_t** input,
Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose)
{
fit_transform_impl(handle,
rank_sizes,
n_parts,
input,
trans_input,
components,
explained_var,
explained_var_ratio,
singular_vals,
mu,
noise_vars,
prms,
verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** input,
float* components,
Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** input,
double* components,
Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
transform_impl(
handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<float>** trans_input,
float* components,
Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
void inverse_transform(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
Matrix::Data<double>** trans_input,
double* components,
Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose)
{
inverse_transform_impl(
handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
2d89fce0d27ba35b9537a65e9e7546d8f017a85b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
//const int NUM_ITERATIONS = 512;
const int NUM_ITERATIONS = 10;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(hipMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
//BlackScholesGPU<<<DIV_UP((OPT_N/2), 16), 16/*480, 128*/>>>(
hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP((OPT_N/2), 16)), dim3(16/*480), 128*/, 0, 0,
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
printf("Options count : %i \n", 2 * OPT_N);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 16);
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(hipFree(d_OptionYears));
checkCudaErrors(hipFree(d_OptionStrike));
checkCudaErrors(hipFree(d_StockPrice));
checkCudaErrors(hipFree(d_PutResult));
checkCudaErrors(hipFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
2d89fce0d27ba35b9537a65e9e7546d8f017a85b.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
//const int NUM_ITERATIONS = 512;
const int NUM_ITERATIONS = 10;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(cudaMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
//BlackScholesGPU<<<DIV_UP((OPT_N/2), 16), 16/*480, 128*/>>>(
BlackScholesGPU<<<DIV_UP((OPT_N/2), 16), 16/*480, 128*/>>>(
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
printf("Options count : %i \n", 2 * OPT_N);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 16);
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(cudaFree(d_OptionYears));
checkCudaErrors(cudaFree(d_OptionStrike));
checkCudaErrors(cudaFree(d_StockPrice));
checkCudaErrors(cudaFree(d_PutResult));
checkCudaErrors(cudaFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
matrixLogit.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixLogit(double *a, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = log(a[y * cc + x] / (1-a[y * cc + x]));
}
}
|
matrixLogit.cu
|
#include "includes.h"
__global__ void matrixLogit(double *a, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = log(a[y * cc + x] / (1-a[y * cc + x]));
}
}
|
eca239435160d29959da47bc877dab6f17240c6e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include "fusion_cuda.h"
#define CUDA_NUM_THREADS 256
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
/*
#ifdef __cplusplus
extern "C" {
#endif
*/
typedef thrust::tuple<int, int, int, int, int> IntTuple;
struct less
{
__host__ __device__
bool operator()(const IntTuple& t1, const IntTuple& t2)
{
if (t1.get<3>() != t2.get<3>())
return t1.get<3>() < t2.get<3>();
if (t1.get<0>() != t2.get<0>())
return t1.get<0>() < t2.get<0>();
if (t1.get<1>() != t2.get<1>())
return t1.get<1>() < t2.get<1>();
return t1.get<2>() < t2.get<2>();
}
};
__global__ void remove_repeat(const int n, const int *x, const int*y, const int* z, const int*batch, const int* idx, int* dst) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
if(index==0){
dst[index]=idx[index];
return;
}
int pre = index -1;
if(x[pre]==x[index]&&y[pre]==y[index]&&z[pre]==z[index]&&batch[pre]==batch[index])
dst[index]=-1-idx[index];
else
dst[index]=idx[index];
}
__global__ void merge_feature(const int n, const int * order, const int channel, float* feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
if(order[index]<0)
return;
int next = index + 1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
feature[order[index]*channel+i] += feature[loc_next*channel+i];
}
next ++;
}
for(int i=0;i<channel; i++){
feature[order[index]*channel+i] /= (next-index);
}
}
__global__ void merge_feature_max(const int n, const int * order, const int channel, float* feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
if(order[index]<0)
return;
int next = index + 1;
// if(next>=n)
// return;
int cur = order[index];
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
if(feature[cur*channel+i] < feature[loc_next*channel+i])
feature[cur*channel+i] = feature[loc_next*channel+i];
}
next ++;
}
}
/*
order_out: dense mapping order of the output/selected tensor (repeated removed).
*/
__global__ void feature_backward1(const int n, const float* grad_out, const int * order_out, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int source = order_out[index];
for(int i=0;i<channel;i++){
grad_feature[source*channel+i]=grad_out[index*channel+i];
}
}
/*
order: sorted mapping order of the input tensor (including repeated points/coords).
Fill the gradient of repeated points/coords.
*/
__global__ void feature_backward2(const int n, const int * order, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
int source = order[index];
if(source<0)
return;
int next = index +1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
grad_feature[loc_next*channel+i]=grad_feature[source*channel+i];
}
next ++;
}
float norm = next - index;
for(int k =index; k<next; k++){
for(int i=0;i<channel;i++){
grad_feature[order[k]*channel+i] /= norm;
}
}
}
__global__ void feature_backward_max(const int n, const int * order, const float* feature, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
int source = order[index];
if(source<0)
return;
int next = index +1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
next ++;
}
for(int i=0;i<channel;i++){
int cur = order[index];
for(int k =index; k<next; k++){
int loc_next = -1 - order[k];
if(feature[cur*channel+i] < feature[loc_next*channel+i])
cur = loc_next;
}
float temp = grad_feature[order[index]*channel+i];
grad_feature[order[index]*channel+i] = 0;
grad_feature[cur*channel+i] = temp;
}
}
__device__ int compare(const int x, const int y, const int z, const int batch, const int * que) {
if ((batch==que[3])&&(x==que[0])&&(y==que[1])&&(z==que[2]))
return -1;
if (batch != que[3])
return int(batch < que[3]);
if (x != que[0])
return int(x < que[0]);
if (y != que[1])
return int(y < que[1]);
return int(z < que[2]);
}
__global__ void search(const int n, const int length, const int *x, const int*y, const int* z, const int*batch, const int* order, const int* que, int* idx) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
int flag = compare(x[cur], y[cur], z[cur], batch[cur], que + index*4);
if(flag==-1){
idx[index] = order[cur];
return;
}
if(flag==0)
right = cur-1;
if(flag==1)
left = cur +1;
cur = (left + right)/2;
}
idx[index] = -1;
}
void merge_cuda_forward(at::Tensor coords, at::Tensor features, at::Tensor order){
int num = coords.size(1);
int channel = coords.size(0);
int fea_channel = features.size(1);
if(channel!=4){
printf("error in coords shape!\n");
exit(0);
}
float *feas = features.data<float>();
int *locs = coords.data<int>();
int *dst_order = order.data<int>();
thrust::device_ptr<int> dev_ptr(locs);
thrust::device_vector<int> x (dev_ptr, dev_ptr + num);
thrust::device_vector<int> y(dev_ptr+num, dev_ptr +2*num);
thrust::device_vector<int> z(dev_ptr+2*num, dev_ptr+3*num);
thrust::device_vector<int> batch(dev_ptr+3*num, dev_ptr+4*num);
thrust::device_ptr<int> dev_idx(dst_order);
thrust::device_vector<int> idx(dev_idx, dev_idx + num);
sort(thrust::make_zip_iterator(thrust::make_tuple(x.begin(), y.begin(), z.begin(), batch.begin(), idx.begin())), thrust::make_zip_iterator(thrust::make_tuple(x.end(), y.end(), z.end(), batch.end(), idx.end())), less());
const int* ptr_x = thrust::raw_pointer_cast(&x[0]);
const int* ptr_y = thrust::raw_pointer_cast(&y[0]);
const int* ptr_z = thrust::raw_pointer_cast(&z[0]);
const int* ptr_batch = thrust::raw_pointer_cast(&batch[0]);
const int* ptr_idx = thrust::raw_pointer_cast(&idx[0]);
int threads = (num + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( remove_repeat), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, num, ptr_x, ptr_y, ptr_z, ptr_batch, ptr_idx, dst_order);
hipLaunchKernelGGL(( merge_feature), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, num, dst_order, fea_channel, feas);
x.clear();
thrust::device_vector<int>().swap(x);
y.clear();
thrust::device_vector<int>().swap(y);
z.clear();
thrust::device_vector<int>().swap(z);
batch.clear();
thrust::device_vector<int>().swap(batch);
idx.clear();
thrust::device_vector<int>().swap(idx);
}
void merge_cuda_backward(at::Tensor grad_output, at::Tensor out_order, at::Tensor order, at::Tensor features, at::Tensor grad_input){
int num1 = order.size(0);
int num2 = out_order.size(0);
int channel = grad_output.size(1);
float *feas = features.data<float>();
int *out_idx = out_order.data<int>();
int *idx = order.data<int>();
float * gradout = grad_output.data<float>();
float * gradin = grad_input.data<float>();
int threads = (num2 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( feature_backward1), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, num2, gradout, out_idx, channel, gradin);
threads = (num1 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( feature_backward2), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, num1, idx, channel, gradin);
}
void get_index_cuda(at::Tensor coords, at::Tensor order, at::Tensor query, at::Tensor index){
int num1 = coords.size(1);
int channel1 = coords.size(0);
int num2 = query.size(0);
int channel2 = query.size(1);
if(channel1!=channel2 && channel1!=4){
printf("%d %d %d %d\n", channel1, channel2, num1, num2);
printf("error in coords shape!\n");
exit(0);
}
int *que = query.data<int>();
int *locs = coords.data<int>();
int *src_order = order.data<int>();
int *dst_idx = index.data<int>();
thrust::device_ptr<int> dev_ptr(locs);
thrust::device_vector<int> x(dev_ptr, dev_ptr + num1);
thrust::device_vector<int> y(dev_ptr+num1, dev_ptr +2*num1);
thrust::device_vector<int> z(dev_ptr+2*num1, dev_ptr+3*num1);
thrust::device_vector<int> batch(dev_ptr+3*num1, dev_ptr+4*num1);
thrust::device_ptr<int> dev_idx(src_order);
thrust::device_vector<int> idx(dev_idx, dev_idx + num1);
sort(thrust::make_zip_iterator(thrust::make_tuple(x.begin(), y.begin(), z.begin(), batch.begin(), idx.begin())), thrust::make_zip_iterator(thrust::make_tuple(x.end(), y.end(), z.end(), batch.end(), idx.end())), less());
const int* ptr_x = thrust::raw_pointer_cast(&x[0]);
const int* ptr_y = thrust::raw_pointer_cast(&y[0]);
const int* ptr_z = thrust::raw_pointer_cast(&z[0]);
const int* ptr_batch = thrust::raw_pointer_cast(&batch[0]);
const int* ptr_idx = thrust::raw_pointer_cast(&idx[0]);
int threads = (num2 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( search), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, num2, num1, ptr_x, ptr_y, ptr_z, ptr_batch, ptr_idx, que, dst_idx);
x.clear();
thrust::device_vector<int>().swap(x);
y.clear();
thrust::device_vector<int>().swap(y);
z.clear();
thrust::device_vector<int>().swap(z);
batch.clear();
thrust::device_vector<int>().swap(batch);
idx.clear();
thrust::device_vector<int>().swap(idx);
}
__global__ void knn_kernel(const int n, const int m, const int k, const int start, const float * known, const float* unknown, float * dist2, int * idx) {
// unknown: (N, 3)
// known: (M, 3)
// output:
// dist2: (N, k)
// idx: (N, k)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= n) return;
unknown += pt_idx * 3;
// known += bs_idx * m * 3;
dist2 += pt_idx * k;
idx += pt_idx * k;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
// double best1 = 1e40, best2 = 1e40, best3 = 1e40;
// int besti1 = 0, besti2 = 0, besti3 = 0;
for(int j = 0; j < k; j++)
dist2[j] = 1e10;
for (int i = 0; i < m; ++i) {
float x = known[i * 3 + 0];
float y = known[i * 3 + 1];
float z = known[i * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
for(int j = 0; j < k; j++){
if(d < dist2[j]){
// memcpy(dist2+j+1, dist2+j, (k-j-1)*sizeof(float));
// memcpy(idx+j+1, idx+j, (k-j-1)*sizeof(int));
#if 1
for(int l=k-1;l>j;l--){
dist2[l]=dist2[l-1];
idx[l] = idx[l-1];
}
dist2[j] = d;
idx[j]=i+start;
break;
#else
if(j==k-1){
dist2[j]=d;
idx[j]=i+start;
break;
}
else{
hipMemcpyAsync(dist2+j+1, dist2+j, (k-j-1)*sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpyAsync(idx+j+1, idx+j, (k-j-1)*sizeof(float), hipMemcpyDeviceToDevice);
// memcpy(dist2+j+1, dist2+j, (k-j-1)*sizeof(float));
// memcpy(idx+j+1, idx+j, (k-j-1)*sizeof(int));
dist2[j] = d;
idx[j]=i+start;
break;
}
#endif
}
}
}
}
__global__ void knn_kernel2(const int n, const int k, const int *batches, const int *end, const float * known, const float* unknown, float * dist2, int * idx) {
// unknown: (N, 3)
// known: (M, 3)
// output:
// dist2: (N, k)
// idx: (N, k)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= n) return;
unknown += pt_idx * 3;
// known += bs_idx * m * 3;
dist2 += pt_idx * k;
idx += pt_idx * k;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
// double best1 = 1e40, best2 = 1e40, best3 = 1e40;
// int besti1 = 0, besti2 = 0, besti3 = 0;
for(int j = 0; j < k; j++)
dist2[j] = 1e10;
int cur_batch = batches[pt_idx];
int start = 0;
int stop = end[cur_batch];
if(cur_batch>0) start = end[cur_batch-1];
for (int i = start; i < stop; i++) {
float x = known[i * 3 + 0];
float y = known[i * 3 + 1];
float z = known[i * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
for(int j = 0; j < k; j++){
if(d < dist2[j]){
for(int l=k-1;l>j;l--){
dist2[l]=dist2[l-1];
idx[l] = idx[l-1];
}
dist2[j] = d;
idx[j]=i;
break;
}
}
}
}
__global__ void locate_kernel(const int n, const int length, const int*batch, int* locs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
if(index<batch[cur])
right = cur-1;
if(index>=batch[cur])
left = cur +1;
cur = (left + right)/2;
}
locs[index] = left;
// printf("%d %d\n", index, locs[index]);
}
/*
int locate(const int *batch, const int length, const int index) {
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
if(index<batch[cur])
right = cur-1;
if(index>=batch[cur])
left = cur +1;
cur = (left + right)/2;
}
return left;
// printf("%d %d\n", index, locs[index]);
}
*/
__device__ void inv_index(const int n, const int k, const int * idx, int * inv_idx, int* end) {
for(int i=0;i<n*k;i++){
int j = idx[i];
end[j+1] ++;
}
for(int i=0;i<n*k;i++)
end[i+1] += end[i];
for(int i=0;i<n*k;i++){
int j = idx[i];
inv_idx[end[j]] = i/k;
end[j]++;
}
}
void knn_cuda(at::Tensor known, at::Tensor unknown, at::Tensor batch, at::Tensor dist, at::Tensor idx, const int k, const int batchsize) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
// int clc = clock();
int N = unknown.size(0);
int M = known.size(0);
// int batchsize = nums.size(0)
const float * ref = known.data<float>();
const float *que = unknown.data<float>();
const int *batch_idx = batch.data<int>();
// int * end = nums.data<int>();
float * dist2 = dist.data<float>();
int * index = idx.data<int>();
hipError_t err;
int *end;
hipMalloc((void **)&end, batchsize * sizeof(int));
int threads = (batchsize + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
// printf("%d start ...\n", batchsize);
hipLaunchKernelGGL(( locate_kernel), dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, batchsize, N, batch_idx, end);
threads = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( knn_kernel2), dim3(threads), dim3(CUDA_NUM_THREADS), 0, 0, N, k, batch_idx, end, ref, que, dist2, index);
// printf("locate done ...\n");
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
#if 0
int * h_end = (int*)malloc(batchsize * sizeof(int));
hipMemcpy ( h_end, end, sizeof(int)*batchsize, hipMemcpyDeviceToHost);
// for(int i=0;i<batchsize;i++) printf("%d\n", h_end[i]);
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
// dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
for(int i =0;i<batchsize; i++){
// N = h_end[i];
// M = h_end[i];
int start =0;
if(i>0) start=h_end[i-1];
N = h_end[i]-start;
M = N;
threads = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipLaunchKernelGGL(( knn_kernel), dim3(threads), dim3(CUDA_NUM_THREADS), 0, 0, N, M, k, start, ref+start*3, que+start*3, dist2+start*k, index+start*k);
}
free(h_end);
h_end = NULL;
#endif
hipFree(end);
end = NULL;
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
}
/*
#ifdef __cplusplus
}
#endif
*/
|
eca239435160d29959da47bc877dab6f17240c6e.cu
|
#include <torch/extension.h>
#include <stdio.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include "fusion_cuda.h"
#define CUDA_NUM_THREADS 256
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
/*
#ifdef __cplusplus
extern "C" {
#endif
*/
typedef thrust::tuple<int, int, int, int, int> IntTuple;
struct less
{
__host__ __device__
bool operator()(const IntTuple& t1, const IntTuple& t2)
{
if (t1.get<3>() != t2.get<3>())
return t1.get<3>() < t2.get<3>();
if (t1.get<0>() != t2.get<0>())
return t1.get<0>() < t2.get<0>();
if (t1.get<1>() != t2.get<1>())
return t1.get<1>() < t2.get<1>();
return t1.get<2>() < t2.get<2>();
}
};
__global__ void remove_repeat(const int n, const int *x, const int*y, const int* z, const int*batch, const int* idx, int* dst) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
if(index==0){
dst[index]=idx[index];
return;
}
int pre = index -1;
if(x[pre]==x[index]&&y[pre]==y[index]&&z[pre]==z[index]&&batch[pre]==batch[index])
dst[index]=-1-idx[index];
else
dst[index]=idx[index];
}
__global__ void merge_feature(const int n, const int * order, const int channel, float* feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
if(order[index]<0)
return;
int next = index + 1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
feature[order[index]*channel+i] += feature[loc_next*channel+i];
}
next ++;
}
for(int i=0;i<channel; i++){
feature[order[index]*channel+i] /= (next-index);
}
}
__global__ void merge_feature_max(const int n, const int * order, const int channel, float* feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
if(order[index]<0)
return;
int next = index + 1;
// if(next>=n)
// return;
int cur = order[index];
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
if(feature[cur*channel+i] < feature[loc_next*channel+i])
feature[cur*channel+i] = feature[loc_next*channel+i];
}
next ++;
}
}
/*
order_out: dense mapping order of the output/selected tensor (repeated removed).
*/
__global__ void feature_backward1(const int n, const float* grad_out, const int * order_out, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int source = order_out[index];
for(int i=0;i<channel;i++){
grad_feature[source*channel+i]=grad_out[index*channel+i];
}
}
/*
order: sorted mapping order of the input tensor (including repeated points/coords).
Fill the gradient of repeated points/coords.
*/
__global__ void feature_backward2(const int n, const int * order, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
int source = order[index];
if(source<0)
return;
int next = index +1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
int loc_next = -1 - order[next];
for(int i=0;i<channel;i++){
grad_feature[loc_next*channel+i]=grad_feature[source*channel+i];
}
next ++;
}
float norm = next - index;
for(int k =index; k<next; k++){
for(int i=0;i<channel;i++){
grad_feature[order[k]*channel+i] /= norm;
}
}
}
__global__ void feature_backward_max(const int n, const int * order, const float* feature, const int channel, float* grad_feature) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n-1) {
return;
}
int source = order[index];
if(source<0)
return;
int next = index +1;
// if(next>=n)
// return;
if(order[next]>=0)
return;
while(next<n){
if(order[next]>=0)
break;
next ++;
}
for(int i=0;i<channel;i++){
int cur = order[index];
for(int k =index; k<next; k++){
int loc_next = -1 - order[k];
if(feature[cur*channel+i] < feature[loc_next*channel+i])
cur = loc_next;
}
float temp = grad_feature[order[index]*channel+i];
grad_feature[order[index]*channel+i] = 0;
grad_feature[cur*channel+i] = temp;
}
}
__device__ int compare(const int x, const int y, const int z, const int batch, const int * que) {
if ((batch==que[3])&&(x==que[0])&&(y==que[1])&&(z==que[2]))
return -1;
if (batch != que[3])
return int(batch < que[3]);
if (x != que[0])
return int(x < que[0]);
if (y != que[1])
return int(y < que[1]);
return int(z < que[2]);
}
__global__ void search(const int n, const int length, const int *x, const int*y, const int* z, const int*batch, const int* order, const int* que, int* idx) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
int flag = compare(x[cur], y[cur], z[cur], batch[cur], que + index*4);
if(flag==-1){
idx[index] = order[cur];
return;
}
if(flag==0)
right = cur-1;
if(flag==1)
left = cur +1;
cur = (left + right)/2;
}
idx[index] = -1;
}
void merge_cuda_forward(at::Tensor coords, at::Tensor features, at::Tensor order){
int num = coords.size(1);
int channel = coords.size(0);
int fea_channel = features.size(1);
if(channel!=4){
printf("error in coords shape!\n");
exit(0);
}
float *feas = features.data<float>();
int *locs = coords.data<int>();
int *dst_order = order.data<int>();
thrust::device_ptr<int> dev_ptr(locs);
thrust::device_vector<int> x (dev_ptr, dev_ptr + num);
thrust::device_vector<int> y(dev_ptr+num, dev_ptr +2*num);
thrust::device_vector<int> z(dev_ptr+2*num, dev_ptr+3*num);
thrust::device_vector<int> batch(dev_ptr+3*num, dev_ptr+4*num);
thrust::device_ptr<int> dev_idx(dst_order);
thrust::device_vector<int> idx(dev_idx, dev_idx + num);
sort(thrust::make_zip_iterator(thrust::make_tuple(x.begin(), y.begin(), z.begin(), batch.begin(), idx.begin())), thrust::make_zip_iterator(thrust::make_tuple(x.end(), y.end(), z.end(), batch.end(), idx.end())), less());
const int* ptr_x = thrust::raw_pointer_cast(&x[0]);
const int* ptr_y = thrust::raw_pointer_cast(&y[0]);
const int* ptr_z = thrust::raw_pointer_cast(&z[0]);
const int* ptr_batch = thrust::raw_pointer_cast(&batch[0]);
const int* ptr_idx = thrust::raw_pointer_cast(&idx[0]);
int threads = (num + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
remove_repeat<<< threads, CUDA_NUM_THREADS >>>(num, ptr_x, ptr_y, ptr_z, ptr_batch, ptr_idx, dst_order);
merge_feature<<< threads, CUDA_NUM_THREADS >>>(num, dst_order, fea_channel, feas);
x.clear();
thrust::device_vector<int>().swap(x);
y.clear();
thrust::device_vector<int>().swap(y);
z.clear();
thrust::device_vector<int>().swap(z);
batch.clear();
thrust::device_vector<int>().swap(batch);
idx.clear();
thrust::device_vector<int>().swap(idx);
}
void merge_cuda_backward(at::Tensor grad_output, at::Tensor out_order, at::Tensor order, at::Tensor features, at::Tensor grad_input){
int num1 = order.size(0);
int num2 = out_order.size(0);
int channel = grad_output.size(1);
float *feas = features.data<float>();
int *out_idx = out_order.data<int>();
int *idx = order.data<int>();
float * gradout = grad_output.data<float>();
float * gradin = grad_input.data<float>();
int threads = (num2 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
feature_backward1<<< threads, CUDA_NUM_THREADS >>>(num2, gradout, out_idx, channel, gradin);
threads = (num1 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
feature_backward2<<< threads, CUDA_NUM_THREADS >>>(num1, idx, channel, gradin);
}
void get_index_cuda(at::Tensor coords, at::Tensor order, at::Tensor query, at::Tensor index){
int num1 = coords.size(1);
int channel1 = coords.size(0);
int num2 = query.size(0);
int channel2 = query.size(1);
if(channel1!=channel2 && channel1!=4){
printf("%d %d %d %d\n", channel1, channel2, num1, num2);
printf("error in coords shape!\n");
exit(0);
}
int *que = query.data<int>();
int *locs = coords.data<int>();
int *src_order = order.data<int>();
int *dst_idx = index.data<int>();
thrust::device_ptr<int> dev_ptr(locs);
thrust::device_vector<int> x(dev_ptr, dev_ptr + num1);
thrust::device_vector<int> y(dev_ptr+num1, dev_ptr +2*num1);
thrust::device_vector<int> z(dev_ptr+2*num1, dev_ptr+3*num1);
thrust::device_vector<int> batch(dev_ptr+3*num1, dev_ptr+4*num1);
thrust::device_ptr<int> dev_idx(src_order);
thrust::device_vector<int> idx(dev_idx, dev_idx + num1);
sort(thrust::make_zip_iterator(thrust::make_tuple(x.begin(), y.begin(), z.begin(), batch.begin(), idx.begin())), thrust::make_zip_iterator(thrust::make_tuple(x.end(), y.end(), z.end(), batch.end(), idx.end())), less());
const int* ptr_x = thrust::raw_pointer_cast(&x[0]);
const int* ptr_y = thrust::raw_pointer_cast(&y[0]);
const int* ptr_z = thrust::raw_pointer_cast(&z[0]);
const int* ptr_batch = thrust::raw_pointer_cast(&batch[0]);
const int* ptr_idx = thrust::raw_pointer_cast(&idx[0]);
int threads = (num2 + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
search<<< threads, CUDA_NUM_THREADS >>>(num2, num1, ptr_x, ptr_y, ptr_z, ptr_batch, ptr_idx, que, dst_idx);
x.clear();
thrust::device_vector<int>().swap(x);
y.clear();
thrust::device_vector<int>().swap(y);
z.clear();
thrust::device_vector<int>().swap(z);
batch.clear();
thrust::device_vector<int>().swap(batch);
idx.clear();
thrust::device_vector<int>().swap(idx);
}
__global__ void knn_kernel(const int n, const int m, const int k, const int start, const float * known, const float* unknown, float * dist2, int * idx) {
// unknown: (N, 3)
// known: (M, 3)
// output:
// dist2: (N, k)
// idx: (N, k)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= n) return;
unknown += pt_idx * 3;
// known += bs_idx * m * 3;
dist2 += pt_idx * k;
idx += pt_idx * k;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
// double best1 = 1e40, best2 = 1e40, best3 = 1e40;
// int besti1 = 0, besti2 = 0, besti3 = 0;
for(int j = 0; j < k; j++)
dist2[j] = 1e10;
for (int i = 0; i < m; ++i) {
float x = known[i * 3 + 0];
float y = known[i * 3 + 1];
float z = known[i * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
for(int j = 0; j < k; j++){
if(d < dist2[j]){
// memcpy(dist2+j+1, dist2+j, (k-j-1)*sizeof(float));
// memcpy(idx+j+1, idx+j, (k-j-1)*sizeof(int));
#if 1
for(int l=k-1;l>j;l--){
dist2[l]=dist2[l-1];
idx[l] = idx[l-1];
}
dist2[j] = d;
idx[j]=i+start;
break;
#else
if(j==k-1){
dist2[j]=d;
idx[j]=i+start;
break;
}
else{
cudaMemcpyAsync(dist2+j+1, dist2+j, (k-j-1)*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpyAsync(idx+j+1, idx+j, (k-j-1)*sizeof(float), cudaMemcpyDeviceToDevice);
// memcpy(dist2+j+1, dist2+j, (k-j-1)*sizeof(float));
// memcpy(idx+j+1, idx+j, (k-j-1)*sizeof(int));
dist2[j] = d;
idx[j]=i+start;
break;
}
#endif
}
}
}
}
__global__ void knn_kernel2(const int n, const int k, const int *batches, const int *end, const float * known, const float* unknown, float * dist2, int * idx) {
// unknown: (N, 3)
// known: (M, 3)
// output:
// dist2: (N, k)
// idx: (N, k)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= n) return;
unknown += pt_idx * 3;
// known += bs_idx * m * 3;
dist2 += pt_idx * k;
idx += pt_idx * k;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
// double best1 = 1e40, best2 = 1e40, best3 = 1e40;
// int besti1 = 0, besti2 = 0, besti3 = 0;
for(int j = 0; j < k; j++)
dist2[j] = 1e10;
int cur_batch = batches[pt_idx];
int start = 0;
int stop = end[cur_batch];
if(cur_batch>0) start = end[cur_batch-1];
for (int i = start; i < stop; i++) {
float x = known[i * 3 + 0];
float y = known[i * 3 + 1];
float z = known[i * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
for(int j = 0; j < k; j++){
if(d < dist2[j]){
for(int l=k-1;l>j;l--){
dist2[l]=dist2[l-1];
idx[l] = idx[l-1];
}
dist2[j] = d;
idx[j]=i;
break;
}
}
}
}
__global__ void locate_kernel(const int n, const int length, const int*batch, int* locs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
if(index<batch[cur])
right = cur-1;
if(index>=batch[cur])
left = cur +1;
cur = (left + right)/2;
}
locs[index] = left;
// printf("%d %d\n", index, locs[index]);
}
/*
int locate(const int *batch, const int length, const int index) {
int left = 0;
int right = length-1;
int cur = (left+right)/2;
while(left <= right){
if(index<batch[cur])
right = cur-1;
if(index>=batch[cur])
left = cur +1;
cur = (left + right)/2;
}
return left;
// printf("%d %d\n", index, locs[index]);
}
*/
__device__ void inv_index(const int n, const int k, const int * idx, int * inv_idx, int* end) {
for(int i=0;i<n*k;i++){
int j = idx[i];
end[j+1] ++;
}
for(int i=0;i<n*k;i++)
end[i+1] += end[i];
for(int i=0;i<n*k;i++){
int j = idx[i];
inv_idx[end[j]] = i/k;
end[j]++;
}
}
void knn_cuda(at::Tensor known, at::Tensor unknown, at::Tensor batch, at::Tensor dist, at::Tensor idx, const int k, const int batchsize) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
// int clc = clock();
int N = unknown.size(0);
int M = known.size(0);
// int batchsize = nums.size(0)
const float * ref = known.data<float>();
const float *que = unknown.data<float>();
const int *batch_idx = batch.data<int>();
// int * end = nums.data<int>();
float * dist2 = dist.data<float>();
int * index = idx.data<int>();
cudaError_t err;
int *end;
cudaMalloc((void **)&end, batchsize * sizeof(int));
int threads = (batchsize + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
// printf("%d start ...\n", batchsize);
locate_kernel<<<threads, CUDA_NUM_THREADS >>>(batchsize, N, batch_idx, end);
threads = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
knn_kernel2<<<threads, CUDA_NUM_THREADS>>>(N, k, batch_idx, end, ref, que, dist2, index);
// printf("locate done ...\n");
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
#if 0
int * h_end = (int*)malloc(batchsize * sizeof(int));
cudaMemcpy ( h_end, end, sizeof(int)*batchsize, cudaMemcpyDeviceToHost);
// for(int i=0;i<batchsize;i++) printf("%d\n", h_end[i]);
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
// dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
// dim3 threads(THREADS_PER_BLOCK);
for(int i =0;i<batchsize; i++){
// N = h_end[i];
// M = h_end[i];
int start =0;
if(i>0) start=h_end[i-1];
N = h_end[i]-start;
M = N;
threads = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
knn_kernel<<<threads, CUDA_NUM_THREADS>>>(N, M, k, start, ref+start*3, que+start*3, dist2+start*k, index+start*k);
}
free(h_end);
h_end = NULL;
#endif
cudaFree(end);
end = NULL;
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
// printf("%dms\n", (clock()-clc) * 1000 / CLOCKS_PER_SEC);
}
/*
#ifdef __cplusplus
}
#endif
*/
|
fe6e8c1330f37a4de1a67cfbdab04943aa5b275b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "2Dconvolution.h"
// includes, kernels
__constant__ float Mc[KERNEL_SIZE * KERNEL_SIZE];
#include "2Dconvolution_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
int ReadParamsFile(int* params, char* file_name, int num_params);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
bool CompareMatrices(Matrix A, Matrix B);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*) malloc(2*sizeof(int));
unsigned int data_read = ReadParamsFile(params, argv[1], 2);
if(data_read != 2){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
free(params);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
// M * N on the device
ConvolutionOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
hipEvent_t start, stop;
float elapsedTime = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time taken by CPU Gold %lf milliseconds.\n", elapsedTime);
// in this case check if the result is equivalent to the expected soluion
bool res = CompareMatrices(reference, P);
printf("Test %s\n", (res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
hipEvent_t startBig, stopBig, start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&startBig);
hipEventCreate(&stopBig);
hipEventRecord(startBig);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
int size = M.width * M.height * sizeof(float);
Md.height = M.height;
Md.width = M.width;
Md.pitch = M.pitch;
hipMemcpyToSymbol(Mc, M.elements, size);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 GD, BD;
BD.x = BLOCK_SIZE;
BD.y = BLOCK_SIZE;
BD.z = 1;
GD.x = ceil((float)P.width/TILE_SIZE),
GD.y = ceil((float)P.height/TILE_SIZE),
GD.z = 1;
hipEventRecord(start);
// Launch the device computation threads!
hipLaunchKernelGGL(( ConvolutionKernel), dim3(GD), dim3(BD), 0, 0, Nd, Pd);
hipEventRecord(stop);
hipEventSynchronize(stop);
float kernelElapsedTime = 0;
hipEventElapsedTime(&kernelElapsedTime, start, stop);
printf("Time taken by GPU Kernel %lf milliseconds.\n", kernelElapsedTime);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
hipEventRecord(stopBig);
hipEventSynchronize(stopBig);
float elapsedTime = 0;
hipEventElapsedTime(&elapsedTime, startBig, stopBig);
printf("Overhead Time taken by GPU %lf milliseconds.\n", elapsedTime-kernelElapsedTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.001f)
return false;
return true;
}
|
fe6e8c1330f37a4de1a67cfbdab04943aa5b275b.cu
|
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "2Dconvolution.h"
// includes, kernels
__constant__ float Mc[KERNEL_SIZE * KERNEL_SIZE];
#include "2Dconvolution_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
int ReadParamsFile(int* params, char* file_name, int num_params);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
bool CompareMatrices(Matrix A, Matrix B);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*) malloc(2*sizeof(int));
unsigned int data_read = ReadParamsFile(params, argv[1], 2);
if(data_read != 2){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
free(params);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
// M * N on the device
ConvolutionOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time taken by CPU Gold %lf milliseconds.\n", elapsedTime);
// in this case check if the result is equivalent to the expected soluion
bool res = CompareMatrices(reference, P);
printf("Test %s\n", (res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
cudaEvent_t startBig, stopBig, start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startBig);
cudaEventCreate(&stopBig);
cudaEventRecord(startBig);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
int size = M.width * M.height * sizeof(float);
Md.height = M.height;
Md.width = M.width;
Md.pitch = M.pitch;
cudaMemcpyToSymbol(Mc, M.elements, size);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 GD, BD;
BD.x = BLOCK_SIZE;
BD.y = BLOCK_SIZE;
BD.z = 1;
GD.x = ceil((float)P.width/TILE_SIZE),
GD.y = ceil((float)P.height/TILE_SIZE),
GD.z = 1;
cudaEventRecord(start);
// Launch the device computation threads!
ConvolutionKernel<<<GD, BD>>>(Nd, Pd);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float kernelElapsedTime = 0;
cudaEventElapsedTime(&kernelElapsedTime, start, stop);
printf("Time taken by GPU Kernel %lf milliseconds.\n", kernelElapsedTime);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
cudaEventRecord(stopBig);
cudaEventSynchronize(stopBig);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, startBig, stopBig);
printf("Overhead Time taken by GPU %lf milliseconds.\n", elapsedTime-kernelElapsedTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.001f)
return false;
return true;
}
|
573c84ec5ec4c29ae6ffac2be3cb2e3bb369a214.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <petscdevice_cuda.h>
#include "ex18.h"
__global__ void FillValues(PetscInt n, PetscScalar *v)
{
PetscInt i = blockIdx.x * blockDim.x + threadIdx.x;
PetscScalar *s;
if (i < n) {
s = &v[3 * 3 * i];
for (PetscInt vi = 0; vi < 3; vi++) {
for (PetscInt vj = 0; vj < 3; vj++) s[vi * 3 + vj] = vi + 2 * vj;
}
}
}
PetscErrorCode FillMatrixCUDACOO(FEStruct *fe, Mat A)
{
PetscScalar *v;
PetscFunctionBeginUser;
PetscCallCUDA(hipMalloc((void **)&v, 3 * 3 * fe->Ne * sizeof(PetscScalar)));
hipLaunchKernelGGL(( FillValues), dim3((fe->Ne + 255) / 256), dim3(256), 0, 0, fe->Ne, v);
PetscCall(MatSetValuesCOO(A, v, INSERT_VALUES));
PetscCallCUDA(hipFree(v));
PetscFunctionReturn(0);
}
|
573c84ec5ec4c29ae6ffac2be3cb2e3bb369a214.cu
|
#include <petscdevice_cuda.h>
#include "ex18.h"
__global__ void FillValues(PetscInt n, PetscScalar *v)
{
PetscInt i = blockIdx.x * blockDim.x + threadIdx.x;
PetscScalar *s;
if (i < n) {
s = &v[3 * 3 * i];
for (PetscInt vi = 0; vi < 3; vi++) {
for (PetscInt vj = 0; vj < 3; vj++) s[vi * 3 + vj] = vi + 2 * vj;
}
}
}
PetscErrorCode FillMatrixCUDACOO(FEStruct *fe, Mat A)
{
PetscScalar *v;
PetscFunctionBeginUser;
PetscCallCUDA(cudaMalloc((void **)&v, 3 * 3 * fe->Ne * sizeof(PetscScalar)));
FillValues<<<(fe->Ne + 255) / 256, 256>>>(fe->Ne, v);
PetscCall(MatSetValuesCOO(A, v, INSERT_VALUES));
PetscCallCUDA(cudaFree(v));
PetscFunctionReturn(0);
}
|
3a46c498944340b20682dba0af24b858bd19579e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "../include/config.cuh"
#include "../include/matrixComOpt.cuh"
// ATile: TILE_SIZE * TILE_SIZE
// BTile: TILE_SIZE * (TILE_SIZE * VEC_SIZE)
//CBVEC_SIZEC
__global__ void gpuMatrixComOpt(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * VEC_SIZE * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//CAB
//Step 1:load A_{(0, 0)} to shared memory.
//
// <TILE_SIZE, VEC_SIZE>
// TILE_SIZE / VEC_SIZE
//
for(int i = 0; i < TILE_SIZE / VEC_SIZE; i++)
ATile[(i * VEC_SIZE + ty) + TILE_SIZE * tx] = A[a + n * (i * VEC_SIZE + ty) + tx];
//i == 0AVEC_SIZEATileVEC_SIZE
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//B
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//ATile
aPtr += TILE_SIZE;
bPtr += k;
//
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE * VEC_SIZE * bx + TILE_SIZE * ty + tx;
//<TILE_SIZE, TILE_SIZE * VEC_SIZE>C.
//<TILE_SIZE, VEC_SIZE>, VEC_SIZE
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
__global__ void gpuMatrixComOpt8(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * 8 * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//CAB
//Step 1:load A_{(0, 0)} to shared memory.
//
// <TILE_SIZE, VEC_SIZE>
// TILE_SIZE / VEC_SIZE
//
for(int i = 0; i < TILE_SIZE / 8; i++)
ATile[(i * 8 + ty) + TILE_SIZE * tx] = A[a + n * (i * 8 + ty) + tx];
//i == 0AVEC_SIZEATileVEC_SIZE
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//B
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//ATile
aPtr += TILE_SIZE;
bPtr += k;
//
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE *8 * bx + TILE_SIZE * ty + tx;
//<TILE_SIZE, TILE_SIZE * VEC_SIZE>C.
//<TILE_SIZE, VEC_SIZE>, VEC_SIZE
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
__global__ void gpuMatrixComOpt16(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * 16 * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//CAB
//Step 1:load A_{(0, 0)} to shared memory.
//
// <TILE_SIZE, VEC_SIZE>
// TILE_SIZE / VEC_SIZE
//
for(int i = 0; i < TILE_SIZE / 16; i++)
ATile[(i * 16 + ty) + TILE_SIZE * tx] = A[a + n * (i * 16 + ty) + tx];
//i == 0AVEC_SIZEATileVEC_SIZE
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//B
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//ATile
aPtr += TILE_SIZE;
bPtr += k;
//
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE *16 * bx + TILE_SIZE * ty + tx;
//<TILE_SIZE, TILE_SIZE * VEC_SIZE>C.
//<TILE_SIZE, VEC_SIZE>, VEC_SIZE
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
|
3a46c498944340b20682dba0af24b858bd19579e.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include "../include/config.cuh"
#include "../include/matrixComOpt.cuh"
// ATile: TILE_SIZE * TILE_SIZE
// BTile: TILE_SIZE * (TILE_SIZE * VEC_SIZE)
//每个线程计算C的竖着的一条向量,所以我们需要加载一个B值和一列长度为VEC_SIZE的向量C到寄存器中
__global__ void gpuMatrixComOpt(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * VEC_SIZE * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//计算C的一个点需要A的行向量和B的列向量,所以遍历方式不变
//Step 1:load A_{(0, 0)} to shared memory.
//
// 因为共享内存可以在线程之间共享,我们每个线程块有<TILE_SIZE, VEC_SIZE>个线程,
// 所以每个线程加载TILE_SIZE / VEC_SIZE 个值
//
for(int i = 0; i < TILE_SIZE / VEC_SIZE; i++)
ATile[(i * VEC_SIZE + ty) + TILE_SIZE * tx] = A[a + n * (i * VEC_SIZE + ty) + tx];
//实际上i == 0时把A的四个列相距VEC_SIZE的值放到ATile的行相距VEC_SIZE的位置上
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//从B的全局坐标中取出值放入寄存器中
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//因为ATile相当于转置过,所以直接对应相乘即可
aPtr += TILE_SIZE;
bPtr += k;
//把原来一次性的结果分散到多次计算
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE * VEC_SIZE * bx + TILE_SIZE * ty + tx;
//每个线程块计算<TILE_SIZE, TILE_SIZE * VEC_SIZE>大小的C.
//每个线程大小<TILE_SIZE, VEC_SIZE>, 所以每个线程计算VEC_SIZE个数值
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
__global__ void gpuMatrixComOpt8(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * 8 * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//计算C的一个点需要A的行向量和B的列向量,所以遍历方式不变
//Step 1:load A_{(0, 0)} to shared memory.
//
// 因为共享内存可以在线程之间共享,我们每个线程块有<TILE_SIZE, VEC_SIZE>个线程,
// 所以每个线程加载TILE_SIZE / VEC_SIZE 个值
//
for(int i = 0; i < TILE_SIZE / 8; i++)
ATile[(i * 8 + ty) + TILE_SIZE * tx] = A[a + n * (i * 8 + ty) + tx];
//实际上i == 0时把A的四个列相距VEC_SIZE的值放到ATile的行相距VEC_SIZE的位置上
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//从B的全局坐标中取出值放入寄存器中
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//因为ATile相当于转置过,所以直接对应相乘即可
aPtr += TILE_SIZE;
bPtr += k;
//把原来一次性的结果分散到多次计算
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE *8 * bx + TILE_SIZE * ty + tx;
//每个线程块计算<TILE_SIZE, TILE_SIZE * VEC_SIZE>大小的C.
//每个线程大小<TILE_SIZE, VEC_SIZE>, 所以每个线程计算VEC_SIZE个数值
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
__global__ void gpuMatrixComOpt16(int *A, int *B, int *C, int m, int n, int k){
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ int ATile[TILE_SIZE * TILE_SIZE];
volatile int cCol[TILE_SIZE];
for(int i = 0; i < TILE_SIZE; i++) cCol[i] = 0;
int aBegin = n * TILE_SIZE * by;
int aEnd = aBegin + n - 1;
int aStride = TILE_SIZE;
int bBegin = TILE_SIZE * 16 * bx;
int bStride = TILE_SIZE * k;
for(int a = aBegin, b = bBegin; a <= aEnd; a += aStride, b += bStride){
//计算C的一个点需要A的行向量和B的列向量,所以遍历方式不变
//Step 1:load A_{(0, 0)} to shared memory.
//
// 因为共享内存可以在线程之间共享,我们每个线程块有<TILE_SIZE, VEC_SIZE>个线程,
// 所以每个线程加载TILE_SIZE / VEC_SIZE 个值
//
for(int i = 0; i < TILE_SIZE / 16; i++)
ATile[(i * 16 + ty) + TILE_SIZE * tx] = A[a + n * (i * 16 + ty) + tx];
//实际上i == 0时把A的四个列相距VEC_SIZE的值放到ATile的行相距VEC_SIZE的位置上
__syncthreads();
int *aPtr = ATile;
int *bPtr = &B[b + TILE_SIZE * ty + tx];
//从B的全局坐标中取出值放入寄存器中
for(int i = 0; i < TILE_SIZE; i++){
int bVal = * bPtr;
for(int j = 0; j < TILE_SIZE; j++)
cCol[j] += aPtr[j] * bVal;
//因为ATile相当于转置过,所以直接对应相乘即可
aPtr += TILE_SIZE;
bPtr += k;
//把原来一次性的结果分散到多次计算
}
__syncthreads();
}
int cPos = k * TILE_SIZE * by + TILE_SIZE *16 * bx + TILE_SIZE * ty + tx;
//每个线程块计算<TILE_SIZE, TILE_SIZE * VEC_SIZE>大小的C.
//每个线程大小<TILE_SIZE, VEC_SIZE>, 所以每个线程计算VEC_SIZE个数值
//
for(int i = 0;i < TILE_SIZE; i++){
C[cPos] = cCol[i];
cPos += k;
}
}
|
42776463784609f7a1136fb6c8819b39d16a8e0d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <array>
#include <vector>
#include <bitset>
#include <cstdint>
#include <iostream>
#include "./generate_data.cpp"
#define NW 8 // use bitvectors of d=NW*32 bits, example NW=8
#define THREADS_PER_BLOCK 512 // Number of threads per block
#define NUMBER_OF_THREADS 4096
using std::uint32_t; // 32-bit unsigned integer used inside bitvector
// using std::size_t; // unsigned integer for indices
int total_counter = 0;
// type for bitvector
typedef array<uint32_t, NW> bitvec_t;
typedef array<uint32_t, 2> compound_t;
// type for lists of bitvectors
typedef vector<bitvec_t> list_t;
typedef vector<compound_t> output_t;
// type for any function that takes a list_t by reference
typedef void(*callback_list_t)(output_t *);
// takes in two pointers to the address of two bitvec_t's and a third pointer to
// where the results need to go
__global__ void nns_kernel(uint32_t *start_vec_id, uint32_t *vecs,
uint32_t *ret_vec, uint32_t *vector_size,
uint32_t *l_size, uint32_t *thres) {
// compute which vector the thread has to do the xor operation on
uint32_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t prim_vec = *start_vec_id + thread_id;
// the variable in which the amount of ones after the xor are added
uint32_t vectorweight, k;
// make sure the vectorindex is within the amount of vectors
if (prim_vec < *l_size) {
for (uint32_t j = 0; j < prim_vec; j++) {
vectorweight = 0;
/* for each word in the vector do the xor operation with the
* corresponding word of the other vector and count the ones
* with popc
*/
for (k = 0; k < *vector_size; k++) {
vectorweight += __popc(vecs[*vector_size * prim_vec + k] ^
vecs[*vector_size * j + k]);
}
// thresholding the weight on the gpu
ret_vec[thread_id * *l_size + j] = (vectorweight < *thres);
}
}
}
// Takes an output list and prints the indices per line
__host__ void print_output(output_t *output) {
for (uint32_t i = 0; i < (*output).size(); i++) {
total_counter += 1;
//printf("1: %d ", output[i][0]);
//printf("2: %d\n", output[i][1]);
}
(*output).clear();
}
// takes in a reference to vector full of bitvec_t, an uint32 for the threshold
// and a function for handling the output compares all the vectors in L and does
// Nearest neighbour search.
void NSS(const list_t& L, uint32_t t, callback_list_t f) {
// allocate space for all the variable pointers needed
output_t output;
bitvec_t *vecs;
uint32_t *vec, *vecd, *vecsd, *ret_vecd, *ret_vec, *vec_size, *vecd_size,
*l_sized, *l_size, *thres, *thresd, n_blocks, n_threads;
n_blocks = (NUMBER_OF_THREADS + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
n_threads = n_blocks * THREADS_PER_BLOCK;
l_size = (uint32_t *)malloc(sizeof(uint32_t));
*l_size = L.size();
// Initialize Host memory for vectors
vec = (uint32_t *)malloc(sizeof(uint32_t));
vecs = (bitvec_t *)malloc(sizeof(bitvec_t) * *l_size);
ret_vec = (uint32_t *)calloc(*l_size *n_threads, sizeof(uint32_t));
vec_size = (uint32_t *)malloc(sizeof(uint32_t));
thres = (uint32_t *)malloc(sizeof(uint32_t));
// Copy location of data in vector
memcpy(vecs, L.data(), *l_size * sizeof(bitvec_t));
// Set vector size
*vec_size = L[0].size();
*thres = t;
// Allocate device memory for needed data
hipMalloc((void **)&vecd, sizeof(bitvec_t));
hipMalloc((void **)&vecsd,*l_size * sizeof(bitvec_t));
hipMalloc((void **)&vecd_size, sizeof(uint32_t));
hipMalloc((void **)&l_sized, sizeof(uint32_t));
hipMalloc((void **)&thresd, sizeof(uint32_t));
hipMalloc((void **)&ret_vecd,
*l_size * n_threads * sizeof(uint32_t));
// Store L in device memory
hipMemcpy(vecsd, vecs, *l_size * sizeof(bitvec_t), hipMemcpyHostToDevice);
// Store vector size in device memory
hipMemcpy(vecd_size, vec_size, sizeof(uint32_t), hipMemcpyHostToDevice);
// Store list size in device memory
hipMemcpy(l_sized, l_size, sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(thresd, thres, sizeof(uint32_t), hipMemcpyHostToDevice);
// start first iteration at vector with index 1
*vec = 1;
hipMemcpy(vecd, vec, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( nns_kernel), dim3(n_blocks), dim3(THREADS_PER_BLOCK) , 0, 0,
vecd, vecsd, ret_vecd, vecd_size, l_sized, thresd);
hipMemcpy(ret_vec, ret_vecd,
*l_size * n_threads * sizeof(uint32_t),
hipMemcpyDeviceToHost);
uint32_t j,prim_vec, sec_vec;
int i;
for (i = 1 + n_threads; i < *l_size; i = i + n_threads) {
// Initialize device memory to write found weights to
*vec = i;
hipMemcpy(vecd, vec, sizeof(uint32_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( nns_kernel), dim3(n_blocks), dim3(THREADS_PER_BLOCK) , 0, 0,
vecd, vecsd, ret_vecd, vecd_size, l_sized, thresd);
for (j = 0; j < n_threads; j++) {
prim_vec = i - n_threads + j;
if (prim_vec < *l_size) {
for (sec_vec = 0; sec_vec < prim_vec; sec_vec++) {
// check if hit or miss
if(ret_vec[j * *l_size + sec_vec]) {
compound_t callback_pair;
callback_pair[0] = prim_vec;
callback_pair[1] = sec_vec;
output.emplace_back(callback_pair);
}
}
}
}
// Empty output list
f(&output);
// Retrieve found weights from GPU memory
hipMemcpy(ret_vec, ret_vecd,
*l_size * n_threads * sizeof(uint32_t),
hipMemcpyDeviceToHost);
}
for (j = 0; j < n_threads; j++) {
prim_vec = i - n_threads + j;
if (prim_vec < *l_size) {
for (sec_vec = 0; sec_vec < prim_vec; sec_vec++) {
// check if hit or miss
if(ret_vec[j * *l_size + sec_vec]) {
compound_t callback_pair;
callback_pair[0] = prim_vec;
callback_pair[1] = sec_vec;
output.emplace_back(callback_pair);
}
}
}
}
// Empty output list
f(&output);
hipFree(vecd); hipFree(vecsd); hipFree(ret_vecd); hipFree(vecd_size);
hipFree(l_sized); hipFree(thresd);
free(vec); free(ret_vec); free(vecs); free(vec_size); free(l_size);
free(thres);
}
int main() {
list_t test;
uint32_t leng = 5000;
// starting the timer
clock_t start;
double duration;
start = clock();
// generating the dataset
generate_random_list(test, leng);
// setting the threshold
uint32_t t = 110;
NSS(test, t, print_output);
// end the timer
duration = (clock() - start ) / (double) CLOCKS_PER_SEC;
cout<<"execution duration: "<< duration <<'\n';
cout<<"total pairs: " << total_counter << '\n';
cout.flush();
return 0;
}
|
42776463784609f7a1136fb6c8819b39d16a8e0d.cu
|
#include <array>
#include <vector>
#include <bitset>
#include <cstdint>
#include <iostream>
#include "./generate_data.cpp"
#define NW 8 // use bitvectors of d=NW*32 bits, example NW=8
#define THREADS_PER_BLOCK 512 // Number of threads per block
#define NUMBER_OF_THREADS 4096
using std::uint32_t; // 32-bit unsigned integer used inside bitvector
// using std::size_t; // unsigned integer for indices
int total_counter = 0;
// type for bitvector
typedef array<uint32_t, NW> bitvec_t;
typedef array<uint32_t, 2> compound_t;
// type for lists of bitvectors
typedef vector<bitvec_t> list_t;
typedef vector<compound_t> output_t;
// type for any function that takes a list_t by reference
typedef void(*callback_list_t)(output_t *);
// takes in two pointers to the address of two bitvec_t's and a third pointer to
// where the results need to go
__global__ void nns_kernel(uint32_t *start_vec_id, uint32_t *vecs,
uint32_t *ret_vec, uint32_t *vector_size,
uint32_t *l_size, uint32_t *thres) {
// compute which vector the thread has to do the xor operation on
uint32_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t prim_vec = *start_vec_id + thread_id;
// the variable in which the amount of ones after the xor are added
uint32_t vectorweight, k;
// make sure the vectorindex is within the amount of vectors
if (prim_vec < *l_size) {
for (uint32_t j = 0; j < prim_vec; j++) {
vectorweight = 0;
/* for each word in the vector do the xor operation with the
* corresponding word of the other vector and count the ones
* with popc
*/
for (k = 0; k < *vector_size; k++) {
vectorweight += __popc(vecs[*vector_size * prim_vec + k] ^
vecs[*vector_size * j + k]);
}
// thresholding the weight on the gpu
ret_vec[thread_id * *l_size + j] = (vectorweight < *thres);
}
}
}
// Takes an output list and prints the indices per line
__host__ void print_output(output_t *output) {
for (uint32_t i = 0; i < (*output).size(); i++) {
total_counter += 1;
//printf("1: %d ", output[i][0]);
//printf("2: %d\n", output[i][1]);
}
(*output).clear();
}
// takes in a reference to vector full of bitvec_t, an uint32 for the threshold
// and a function for handling the output compares all the vectors in L and does
// Nearest neighbour search.
void NSS(const list_t& L, uint32_t t, callback_list_t f) {
// allocate space for all the variable pointers needed
output_t output;
bitvec_t *vecs;
uint32_t *vec, *vecd, *vecsd, *ret_vecd, *ret_vec, *vec_size, *vecd_size,
*l_sized, *l_size, *thres, *thresd, n_blocks, n_threads;
n_blocks = (NUMBER_OF_THREADS + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
n_threads = n_blocks * THREADS_PER_BLOCK;
l_size = (uint32_t *)malloc(sizeof(uint32_t));
*l_size = L.size();
// Initialize Host memory for vectors
vec = (uint32_t *)malloc(sizeof(uint32_t));
vecs = (bitvec_t *)malloc(sizeof(bitvec_t) * *l_size);
ret_vec = (uint32_t *)calloc(*l_size *n_threads, sizeof(uint32_t));
vec_size = (uint32_t *)malloc(sizeof(uint32_t));
thres = (uint32_t *)malloc(sizeof(uint32_t));
// Copy location of data in vector
memcpy(vecs, L.data(), *l_size * sizeof(bitvec_t));
// Set vector size
*vec_size = L[0].size();
*thres = t;
// Allocate device memory for needed data
cudaMalloc((void **)&vecd, sizeof(bitvec_t));
cudaMalloc((void **)&vecsd,*l_size * sizeof(bitvec_t));
cudaMalloc((void **)&vecd_size, sizeof(uint32_t));
cudaMalloc((void **)&l_sized, sizeof(uint32_t));
cudaMalloc((void **)&thresd, sizeof(uint32_t));
cudaMalloc((void **)&ret_vecd,
*l_size * n_threads * sizeof(uint32_t));
// Store L in device memory
cudaMemcpy(vecsd, vecs, *l_size * sizeof(bitvec_t), cudaMemcpyHostToDevice);
// Store vector size in device memory
cudaMemcpy(vecd_size, vec_size, sizeof(uint32_t), cudaMemcpyHostToDevice);
// Store list size in device memory
cudaMemcpy(l_sized, l_size, sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(thresd, thres, sizeof(uint32_t), cudaMemcpyHostToDevice);
// start first iteration at vector with index 1
*vec = 1;
cudaMemcpy(vecd, vec, sizeof(uint32_t), cudaMemcpyHostToDevice);
nns_kernel<<< n_blocks, THREADS_PER_BLOCK >>>
(vecd, vecsd, ret_vecd, vecd_size, l_sized, thresd);
cudaMemcpy(ret_vec, ret_vecd,
*l_size * n_threads * sizeof(uint32_t),
cudaMemcpyDeviceToHost);
uint32_t j,prim_vec, sec_vec;
int i;
for (i = 1 + n_threads; i < *l_size; i = i + n_threads) {
// Initialize device memory to write found weights to
*vec = i;
cudaMemcpy(vecd, vec, sizeof(uint32_t), cudaMemcpyHostToDevice);
nns_kernel<<< n_blocks, THREADS_PER_BLOCK >>>
(vecd, vecsd, ret_vecd, vecd_size, l_sized, thresd);
for (j = 0; j < n_threads; j++) {
prim_vec = i - n_threads + j;
if (prim_vec < *l_size) {
for (sec_vec = 0; sec_vec < prim_vec; sec_vec++) {
// check if hit or miss
if(ret_vec[j * *l_size + sec_vec]) {
compound_t callback_pair;
callback_pair[0] = prim_vec;
callback_pair[1] = sec_vec;
output.emplace_back(callback_pair);
}
}
}
}
// Empty output list
f(&output);
// Retrieve found weights from GPU memory
cudaMemcpy(ret_vec, ret_vecd,
*l_size * n_threads * sizeof(uint32_t),
cudaMemcpyDeviceToHost);
}
for (j = 0; j < n_threads; j++) {
prim_vec = i - n_threads + j;
if (prim_vec < *l_size) {
for (sec_vec = 0; sec_vec < prim_vec; sec_vec++) {
// check if hit or miss
if(ret_vec[j * *l_size + sec_vec]) {
compound_t callback_pair;
callback_pair[0] = prim_vec;
callback_pair[1] = sec_vec;
output.emplace_back(callback_pair);
}
}
}
}
// Empty output list
f(&output);
cudaFree(vecd); cudaFree(vecsd); cudaFree(ret_vecd); cudaFree(vecd_size);
cudaFree(l_sized); cudaFree(thresd);
free(vec); free(ret_vec); free(vecs); free(vec_size); free(l_size);
free(thres);
}
int main() {
list_t test;
uint32_t leng = 5000;
// starting the timer
clock_t start;
double duration;
start = clock();
// generating the dataset
generate_random_list(test, leng);
// setting the threshold
uint32_t t = 110;
NSS(test, t, print_output);
// end the timer
duration = (clock() - start ) / (double) CLOCKS_PER_SEC;
cout<<"execution duration: "<< duration <<'\n';
cout<<"total pairs: " << total_counter << '\n';
cout.flush();
return 0;
}
|
6c51abd9ea9ab64f4c90f169f3960b2c785c812a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
|
6c51abd9ea9ab64f4c90f169f3960b2c785c812a.cu
|
#include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
b3f82bb0a062419ccc9b3847b0c0a623b81a5a11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_divScalarf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_divScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_divScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_divScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b3f82bb0a062419ccc9b3847b0c0a623b81a5a11.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_divScalarf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_divScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_divScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_divScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
757134d3a761d54827f223152f22c70e18da1601.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
const unsigned long WIDTH = 8192;
const unsigned long HEIGHT = 8192;
#define THREADS 32
__global__ void add(int* a, int* b, int* c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx > WIDTH || idy > HEIGHT) return;
c[idy * WIDTH + idx] = a[idy * WIDTH + idx] + b[idy * WIDTH + idx];
}
unsigned long get_time()
{
struct timespec ts;
if (clock_gettime(0, &ts) < 0) {
fprintf(stderr, "Error calc time... %s\n", strerror(errno));
exit(1);
}
return ts.tv_sec * 1000000000L + ts.tv_nsec;
}
void init(int* h_v, int numb) {
for (int i = 0; i < HEIGHT; i++) {
for (int j = 0; j < WIDTH; ++j) {
h_v[i * HEIGHT + j] = numb;
}
}
}
void print_results(const int *result)
{
fprintf(stderr, "%s\n", "Result...");
for (int i = 0; i < HEIGHT; ++i) {
for (int j = 0; j < WIDTH; ++j) {
fprintf(stderr, " %d ", result[i * HEIGHT + j]);
}
fprintf(stderr, "%s\n", "");
}
fprintf(stderr, "%s\n", "");
}
int main( void ) {
unsigned long now = get_time();
int *result, *h_a, *h_b;
int *dev_a, *dev_b, *dev_c;
int size = WIDTH * HEIGHT * sizeof(int);
result = (int*) malloc( size );
h_a = (int*) malloc( size );
h_b = (int*) malloc( size );
init(h_a, 7);
init(h_b, 2);
hipMalloc( &dev_a, size );
hipMalloc( &dev_b, size );
hipMalloc( &dev_c, size );
// se transfieren los datos a memoria de dispositivo.
hipMemcpy( dev_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy( dev_b, h_b, size, hipMemcpyHostToDevice);
hipMemset(dev_c, 0, size);
dim3 th(THREADS, THREADS);
dim3 blocks((WIDTH + th.x - 1) / th.x , (HEIGHT + th.y - 1) / th.y);
hipLaunchKernelGGL(( add), dim3(blocks), dim3(th), 0, 0, dev_a, dev_b, dev_c);
// se transfieren los datos del dispositivo a memoria.
hipMemcpy(result, dev_c, size, hipMemcpyDeviceToHost);
free(h_a), free(h_b), free(result);
hipFree(dev_a); hipFree(dev_b); hipFree(dev_c);
fprintf(stderr, "Time %lu\n", get_time() - now);
return 0;
}
|
757134d3a761d54827f223152f22c70e18da1601.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
const unsigned long WIDTH = 8192;
const unsigned long HEIGHT = 8192;
#define THREADS 32
__global__ void add(int* a, int* b, int* c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx > WIDTH || idy > HEIGHT) return;
c[idy * WIDTH + idx] = a[idy * WIDTH + idx] + b[idy * WIDTH + idx];
}
unsigned long get_time()
{
struct timespec ts;
if (clock_gettime(0, &ts) < 0) {
fprintf(stderr, "Error calc time... %s\n", strerror(errno));
exit(1);
}
return ts.tv_sec * 1000000000L + ts.tv_nsec;
}
void init(int* h_v, int numb) {
for (int i = 0; i < HEIGHT; i++) {
for (int j = 0; j < WIDTH; ++j) {
h_v[i * HEIGHT + j] = numb;
}
}
}
void print_results(const int *result)
{
fprintf(stderr, "%s\n", "Result...");
for (int i = 0; i < HEIGHT; ++i) {
for (int j = 0; j < WIDTH; ++j) {
fprintf(stderr, " %d ", result[i * HEIGHT + j]);
}
fprintf(stderr, "%s\n", "");
}
fprintf(stderr, "%s\n", "");
}
int main( void ) {
unsigned long now = get_time();
int *result, *h_a, *h_b;
int *dev_a, *dev_b, *dev_c;
int size = WIDTH * HEIGHT * sizeof(int);
result = (int*) malloc( size );
h_a = (int*) malloc( size );
h_b = (int*) malloc( size );
init(h_a, 7);
init(h_b, 2);
cudaMalloc( &dev_a, size );
cudaMalloc( &dev_b, size );
cudaMalloc( &dev_c, size );
// se transfieren los datos a memoria de dispositivo.
cudaMemcpy( dev_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, h_b, size, cudaMemcpyHostToDevice);
cudaMemset(dev_c, 0, size);
dim3 th(THREADS, THREADS);
dim3 blocks((WIDTH + th.x - 1) / th.x , (HEIGHT + th.y - 1) / th.y);
add<<<blocks, th>>>(dev_a, dev_b, dev_c);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(result, dev_c, size, cudaMemcpyDeviceToHost);
free(h_a), free(h_b), free(result);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
fprintf(stderr, "Time %lu\n", get_time() - now);
return 0;
}
|
3b44ad042a432edb00a18409ec5c65ff88afd9e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu
#ifdef __NVCC__
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#endif
#ifdef __HIP_PLATFORM_HCC__
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#endif
#include "cc_attention_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f,
Tensor weight) {
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
#ifdef __NVCC__
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] {
hipLaunchKernelGGL(( ca_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t,
const Tensor f, Tensor dt, Tensor df) {
AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
#ifdef __NVCC__
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_t<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
dt.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_f<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
df.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g,
Tensor out) {
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
#ifdef __NVCC__
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] {
hipLaunchKernelGGL(( ca_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
out.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight,
const Tensor g, Tensor dw, Tensor dg) {
AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
#ifdef __NVCC__
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "ca_map_backward_kernel_w", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_w<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dw.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_g<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dg.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
|
3b44ad042a432edb00a18409ec5c65ff88afd9e1.cu
|
// Modified from
// https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu
#ifdef __NVCC__
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#endif
#ifdef __HIP_PLATFORM_HCC__
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#endif
#include "cc_attention_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f,
Tensor weight) {
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
#ifdef __NVCC__
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] {
ca_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t,
const Tensor f, Tensor dt, Tensor df) {
AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
#ifdef __NVCC__
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] {
ca_backward_kernel_t<scalar_t><<<blocks, threads, 0, stream>>>(
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
dt.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] {
ca_backward_kernel_f<scalar_t><<<blocks, threads, 0, stream>>>(
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
df.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g,
Tensor out) {
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
#ifdef __NVCC__
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] {
ca_map_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
out.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight,
const Tensor g, Tensor dw, Tensor dg) {
AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
#ifdef __NVCC__
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "ca_map_backward_kernel_w", [&] {
ca_map_backward_kernel_w<scalar_t><<<blocks, threads, 0, stream>>>(
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dw.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] {
ca_map_backward_kernel_g<scalar_t><<<blocks, threads, 0, stream>>>(
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dg.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
#ifdef __NVCC__
THCudaCheck(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
THCudaCheck(hipGetLastError());
#endif
}
|
095a2577470bb5d7880eca43cf8d5b2d6ca1630c.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
095a2577470bb5d7880eca43cf8d5b2d6ca1630c.cu
|
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
3bf90621a03e264c67d375d1a073adcf121fef3a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "cu-utils.h"
#include "perf-measure.h"
#include <chrono>
#include <thread>
__global__ void shuffle_kernel(int* in, int* out, int nElem)
{
auto laneId = getLaneId();
int value = in[threadIdx.x];
int vs = __shfl_up_sync(0xffffffff, value, 1, 8);
printf("thread %d 1st shuffle value %d\n", threadIdx.x, vs);
if (0 == laneId) printf("\n");
vs = __shfl_up_sync(0xffffffff, value, 2, 8);
printf("thread %d 2nd shuffle value %d\n", threadIdx.x, vs);
if (0 == laneId) printf("\n");
vs = __shfl_up_sync(0xffffffff, value, 4, 8);
printf("thread %d 3rd shuffle value %d\n", threadIdx.x, vs);
out[threadIdx.x] = value;
}
int shuffle(int argc, char**argv)
{
constexpr int nElem = 32;
int a_in[nElem];
int a_out[nElem];
for (int i = 0; i < nElem; ++i) {
a_in[i] = i;
}
// Choose which GPU to run on, change this on a multi-GPU system.
auto cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
throw "hipSetDevice failed! Do you have a CUDA-capable GPU installed?";
}
// device pointers
//auto d_in = cuda_make_unique<int>(nElem);
auto d_in = cu_make_unique_memcpy<int>(a_in, nElem);
auto d_out = cu_make_unique<int>(nElem);
auto& del1 = d_in.get_deleter();
auto& del2 = d_out.get_deleter();
Measurements mm;
mm.start();
std::this_thread::sleep_for(std::chrono::microseconds(10'000));
const auto tm = mm.elapsed();
std::cout << "sleep 100 uses time : " << tm << std::endl;
//std::cout << "sleep 100 uses time : " << mm.elapsed() << std::endl;
mm.start();
shuffle_kernel << <1, 32 >> > (d_in.get(), d_out.get(), nElem);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
throw std::string("addKernel launch failed: ") + hipGetErrorString(cudaStatus);
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
throw std::string("hipDeviceSynchronize returned error code: ") + hipGetErrorString(cudaStatus);
}
std::cout << "kernel exec time : " << mm.elapsed() << std::endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a_out, d_out.get(), nElem * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
throw "hipMemcpy failed!";
}
std::cout << "output:";
for (int i = 0; i < nElem; ++i) {
std::cout << a_out[i] << ", ";
}
std::cout << std::endl;
return 0;
}
|
3bf90621a03e264c67d375d1a073adcf121fef3a.cu
|
#include <cuda.h>
#include "cu-utils.h"
#include "perf-measure.h"
#include <chrono>
#include <thread>
__global__ void shuffle_kernel(int* in, int* out, int nElem)
{
auto laneId = getLaneId();
int value = in[threadIdx.x];
int vs = __shfl_up_sync(0xffffffff, value, 1, 8);
printf("thread %d 1st shuffle value %d\n", threadIdx.x, vs);
if (0 == laneId) printf("\n");
vs = __shfl_up_sync(0xffffffff, value, 2, 8);
printf("thread %d 2nd shuffle value %d\n", threadIdx.x, vs);
if (0 == laneId) printf("\n");
vs = __shfl_up_sync(0xffffffff, value, 4, 8);
printf("thread %d 3rd shuffle value %d\n", threadIdx.x, vs);
out[threadIdx.x] = value;
}
int shuffle(int argc, char**argv)
{
constexpr int nElem = 32;
int a_in[nElem];
int a_out[nElem];
for (int i = 0; i < nElem; ++i) {
a_in[i] = i;
}
// Choose which GPU to run on, change this on a multi-GPU system.
auto cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
throw "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?";
}
// device pointers
//auto d_in = cuda_make_unique<int>(nElem);
auto d_in = cu_make_unique_memcpy<int>(a_in, nElem);
auto d_out = cu_make_unique<int>(nElem);
auto& del1 = d_in.get_deleter();
auto& del2 = d_out.get_deleter();
Measurements mm;
mm.start();
std::this_thread::sleep_for(std::chrono::microseconds(10'000));
const auto tm = mm.elapsed();
std::cout << "sleep 100 uses time : " << tm << std::endl;
//std::cout << "sleep 100 uses time : " << mm.elapsed() << std::endl;
mm.start();
shuffle_kernel << <1, 32 >> > (d_in.get(), d_out.get(), nElem);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
throw std::string("addKernel launch failed: ") + cudaGetErrorString(cudaStatus);
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
throw std::string("cudaDeviceSynchronize returned error code: ") + cudaGetErrorString(cudaStatus);
}
std::cout << "kernel exec time : " << mm.elapsed() << std::endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a_out, d_out.get(), nElem * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
throw "cudaMemcpy failed!";
}
std::cout << "output:";
for (int i = 0; i < nElem; ++i) {
std::cout << a_out[i] << ", ";
}
std::cout << std::endl;
return 0;
}
|
ff16ea3daa33b24b7763558b1f75cb27190f4e87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(const IDX data_elem_cnt, const K* indices,
const T* values, const float* learning_rate,
const IDX num_features, const IDX feature_size, T* model,
const IDX feature_id_offset) {
const T minus_lr = -*learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = values[i];
if (val != static_cast<T>(0)) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX feature_id = indices[indices_idx];
assert(feature_id >= 0);
const IDX local_feature_id = feature_id - feature_id_offset;
if (local_feature_id >= 0 && local_feature_id < num_features) {
const IDX update_offset = local_feature_id * feature_size + inner_idx;
cuda::atomic::Add(model + update_offset, val * minus_lr);
}
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(DeviceCtx* ctx, int64_t n, T scale,
float l1, float l2, float weight_decay,
const float* learning_rate,
const T* scale_by_ptr,
const int64_t* skip_if,
const G* model_diff, T* model) {
hipLaunchKernelGGL(( SGDUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K> {
static void Update(DeviceCtx* ctx, int64_t num_indices, int64_t num_features,
int64_t feature_size, int64_t feature_id_offset, const float* learning_rate,
const K* indices, const T* values, T* model);
};
template<typename T, typename K>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K>::Update(
DeviceCtx* ctx, int64_t num_indices, int64_t num_features, int64_t feature_size,
int64_t feature_id_offset, const float* learning_rate, const K* indices, const T* values,
T* model) {
const int64_t values_elem_cnt = num_indices * feature_size;
hipLaunchKernelGGL(( IndexedSlicesSGDUpdateGpu<T, K, int64_t>)
, dim3(BlocksNum4ThreadsNum(values_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
values_elem_cnt, indices, values, learning_rate, num_features, feature_size, model,
feature_id_offset);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, 0.0, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum) {
hipLaunchKernelGGL(( MomentumUpdateGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, skip_if, model_diff,
model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
hipLaunchKernelGGL(( IndexedSlicesMomentumUpdateGpu<T, K>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), beta, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void AdamBiasCorrectionLearningRateGpu(float beta1, float beta2,
const float* learning_rate,
const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float beta1_power = static_cast<float>(pow(beta1, exponent));
const float beta2_power = static_cast<float>(pow(beta2, exponent));
*out = *learning_rate * sqrt(1 - beta2_power) / (1 - beta1_power);
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, scale, l1, l2, beta1, beta2,
epsilon, weight_decay, lr);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(float beta1, float beta2, float epsilon,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v) {
const float lr = *learning_rate;
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
static_cast<T>(1), 0, 0, beta1, beta2, epsilon, 0, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const int64_t* skip_if, const T* w_norm, const T* g_norm,
const T* beta1_t, const T* beta2_t, const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* m, T* v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* m, T* v) {
hipLaunchKernelGGL(( AdamUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr, skip_if,
model_diff, model, m, v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* m, T* v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* m, T* v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), model, m, v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer,
T* beta1_t, T* beta2_t) {
hipLaunchKernelGGL(( AdamUpdateBetaTGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta1, beta2, skip_if, beta1_t, beta2_t);
hipLaunchKernelGGL(( LambGradGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, skip_if, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
hipLaunchKernelGGL(( LambUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, weight_decay, learning_rate, skip_if, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer,
beta1_t, beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* m, T* v) {
hipLaunchKernelGGL(( IndexedSlicesAdamUpdateGpu<T, K>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), beta1, beta2, epsilon, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model, m, v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU> {
static void AdamBiasCorrectionLearningRate(DeviceCtx* ctx, float beta1, float beta2,
const float* learning_rate, const int64_t* train_step,
float* out);
};
void AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU>::AdamBiasCorrectionLearningRate(
DeviceCtx* ctx, float beta1, float beta2, const float* learning_rate, const int64_t* train_step,
float* out) {
hipLaunchKernelGGL(( AdamBiasCorrectionLearningRateGpu), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta1, beta2, learning_rate,
train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, *learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* mean_square, T* mean_gradient) {
if (centered) {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, true>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, false>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, mean_square,
mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* skip_if,
const int64_t* train_step, T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
if (*train_step == 0) {
*local_learning_rate =
*learning_rate * lars_coefficient * (*model_norm) / (epsilon + (*model_diff_norm));
} else {
*local_learning_rate = *learning_rate * lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_diff_norm));
}
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum, T* data_tmp, T* model_diff_tmp) {
hipLaunchKernelGGL(( LarsScaleModelDiffGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
hipLaunchKernelGGL(( LarsGetLocalLearningRateGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(),
learning_rate, weight_decay, epsilon, lars_coefficient, skip_if, train_step, data_tmp);
hipLaunchKernelGGL(( LarsUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
train_step, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum,
data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
|
ff16ea3daa33b24b7763558b1f75cb27190f4e87.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(const IDX data_elem_cnt, const K* indices,
const T* values, const float* learning_rate,
const IDX num_features, const IDX feature_size, T* model,
const IDX feature_id_offset) {
const T minus_lr = -*learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = values[i];
if (val != static_cast<T>(0)) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX feature_id = indices[indices_idx];
assert(feature_id >= 0);
const IDX local_feature_id = feature_id - feature_id_offset;
if (local_feature_id >= 0 && local_feature_id < num_features) {
const IDX update_offset = local_feature_id * feature_size + inner_idx;
cuda::atomic::Add(model + update_offset, val * minus_lr);
}
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(DeviceCtx* ctx, int64_t n, T scale,
float l1, float l2, float weight_decay,
const float* learning_rate,
const T* scale_by_ptr,
const int64_t* skip_if,
const G* model_diff, T* model) {
SGDUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K> {
static void Update(DeviceCtx* ctx, int64_t num_indices, int64_t num_features,
int64_t feature_size, int64_t feature_id_offset, const float* learning_rate,
const K* indices, const T* values, T* model);
};
template<typename T, typename K>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K>::Update(
DeviceCtx* ctx, int64_t num_indices, int64_t num_features, int64_t feature_size,
int64_t feature_id_offset, const float* learning_rate, const K* indices, const T* values,
T* model) {
const int64_t values_elem_cnt = num_indices * feature_size;
IndexedSlicesSGDUpdateGpu<T, K, int64_t>
<<<BlocksNum4ThreadsNum(values_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
values_elem_cnt, indices, values, learning_rate, num_features, feature_size, model,
feature_id_offset);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, 0.0, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum) {
MomentumUpdateGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, skip_if, model_diff,
model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
IndexedSlicesMomentumUpdateGpu<T, K>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(beta, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void AdamBiasCorrectionLearningRateGpu(float beta1, float beta2,
const float* learning_rate,
const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float beta1_power = static_cast<float>(pow(beta1, exponent));
const float beta2_power = static_cast<float>(pow(beta2, exponent));
*out = *learning_rate * sqrt(1 - beta2_power) / (1 - beta1_power);
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, scale, l1, l2, beta1, beta2,
epsilon, weight_decay, lr);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(float beta1, float beta2, float epsilon,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v) {
const float lr = *learning_rate;
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
static_cast<T>(1), 0, 0, beta1, beta2, epsilon, 0, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const int64_t* skip_if, const T* w_norm, const T* g_norm,
const T* beta1_t, const T* beta2_t, const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* m, T* v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* m, T* v) {
AdamUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr, skip_if,
model_diff, model, m, v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* m, T* v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* m, T* v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), model, m, v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer,
T* beta1_t, T* beta2_t) {
AdamUpdateBetaTGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, skip_if, beta1_t, beta2_t);
LambGradGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, skip_if, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
LambUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, weight_decay, learning_rate, skip_if, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
skip_if, reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer,
beta1_t, beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* m, T* v) {
IndexedSlicesAdamUpdateGpu<T, K>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(beta1, beta2, epsilon, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model, m, v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU> {
static void AdamBiasCorrectionLearningRate(DeviceCtx* ctx, float beta1, float beta2,
const float* learning_rate, const int64_t* train_step,
float* out);
};
void AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU>::AdamBiasCorrectionLearningRate(
DeviceCtx* ctx, float beta1, float beta2, const float* learning_rate, const int64_t* train_step,
float* out) {
AdamBiasCorrectionLearningRateGpu<<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, learning_rate,
train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, *learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* mean_square, T* mean_gradient) {
if (centered) {
RmsPropUpdateGpu<T, G, true>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
RmsPropUpdateGpu<T, G, false>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, mean_square,
mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* skip_if,
const int64_t* train_step, T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
if (*train_step == 0) {
*local_learning_rate =
*learning_rate * lars_coefficient * (*model_norm) / (epsilon + (*model_diff_norm));
} else {
*local_learning_rate = *learning_rate * lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_diff_norm));
}
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum, T* data_tmp, T* model_diff_tmp) {
LarsScaleModelDiffGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
LarsGetLocalLearningRateGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(
learning_rate, weight_decay, epsilon, lars_coefficient, skip_if, train_step, data_tmp);
LarsUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
train_step, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum,
data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
|
7e20533c12999cdd763c5765191fd2269b5f896b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlaswp.cu normal z -> s, Wed Sep 17 15:08:23 2014
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "common_magma.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
float *dAT;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswp_kernel( slaswp_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int lda = params.lda;
float *dAT = params.dAT + tid + params.j0*lda;
float *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
float *A2 = dAT + i2*lda;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
// Launch slaswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswp_launch( slaswp_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( slaswp_kernel), dim3(blocks), dim3(NTHREADS), 0, queue , params );
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version updates each entry of ipiv by adding ind.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in sgetrf, sgetrf_gpu, sgetrf_mgpu, sgetrf_ooc.
extern "C" void
magmablas_spermute_long2( magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT, n, lda, ind + k, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1;
ipiv[ind + k + j] += ind;
}
slaswp_launch( params, magma_stream );
}
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version assumes ind has already been added to ipiv.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in sgetrf_mgpu, sgetrf_ooc.
extern "C" void
magmablas_spermute_long3( float *dAT, magma_int_t lda,
const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT, lda, lda, ind + k, npivots };
for( int j = 0; j < MAX_PIVOTS; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind;
}
slaswp_launch( params, magma_stream );
}
}
/**
Purpose:
=============
SLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT REAL array on GPU, stored row-wise, dimension (LDA,N)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A. lda >= n.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1 <= n.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2 <= n.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
// It is used in sgessm, sgetrf_incpiv.
extern "C" void
magmablas_slaswp_q(
magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 1 || k1 > n )
info = -4;
else if ( k2 < 1 || k2 > n )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT+k*lda, n, lda, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
slaswp_launch( params, queue );
}
}
/**
@see magmablas_slaswp_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp( magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
magmablas_slaswp_q( n, dAT, lda, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
typedef struct {
float *dA;
int n, ldx, ldy, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswpx_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswpx_kernel( slaswpx_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int ldx = params.ldx;
float *dA = params.dA + tid*params.ldy + params.j0*ldx;
float *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
float *A2 = dA + i2*ldx;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
// Launch slaswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswpx( slaswpx_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( slaswpx_kernel), dim3(blocks), dim3(NTHREADS), 0, queue , params );
}
/**
Purpose:
=============
SLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dA REAL array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
ldx INTEGER
Stride between elements in same column.
\param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=lda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=lda.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswpx_q(
magma_int_t n, float *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n ldx ldy j0 npivots
slaswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
slaswpx( params, queue );
}
}
/**
@see magmablas_slaswpx_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswpx( magma_int_t n, float *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
return magmablas_slaswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_slaswp
// (including copying pivots to the GPU).
__global__ void slaswp2_kernel(
int n, float *dAT, int lda, int npivots,
const magma_int_t* d_ipiv, magma_int_t inci )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < n ) {
dAT += tid;
float *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
float *A2 = dAT + i2*lda;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
/**
Purpose:
=============
SLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT REAL array on GPU, stored row-wise, dimension (LDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp2_q(
magma_int_t n, float* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
int blocks = (n + NTHREADS - 1) / NTHREADS;
hipLaunchKernelGGL(( slaswp2_kernel), dim3(blocks), dim3(NTHREADS), 0, queue ,
n, dAT + (k1-1)*lda, lda, k2-(k1-1), d_ipiv, inci );
}
/**
@see magmablas_slaswp2_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp2( magma_int_t n, float* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci )
{
magmablas_slaswp2_q( n, dAT, lda, k1, k2, d_ipiv, inci, magma_stream );
}
|
7e20533c12999cdd763c5765191fd2269b5f896b.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlaswp.cu normal z -> s, Wed Sep 17 15:08:23 2014
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "common_magma.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
float *dAT;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswp_kernel( slaswp_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int lda = params.lda;
float *dAT = params.dAT + tid + params.j0*lda;
float *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
float *A2 = dAT + i2*lda;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
// Launch slaswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswp_launch( slaswp_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
slaswp_kernel<<< blocks, NTHREADS, 0, queue >>>( params );
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version updates each entry of ipiv by adding ind.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in sgetrf, sgetrf_gpu, sgetrf_mgpu, sgetrf_ooc.
extern "C" void
magmablas_spermute_long2( magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT, n, lda, ind + k, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1;
ipiv[ind + k + j] += ind;
}
slaswp_launch( params, magma_stream );
}
}
// @deprecated
// Swap rows of A, stored row-wise.
// This version assumes ind has already been added to ipiv.
// (In contrast, LAPACK applies laswp, then updates ipiv.)
// It is used in sgetrf_mgpu, sgetrf_ooc.
extern "C" void
magmablas_spermute_long3( float *dAT, magma_int_t lda,
const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind )
{
for( int k = 0; k < nb; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, nb-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT, lda, lda, ind + k, npivots };
for( int j = 0; j < MAX_PIVOTS; ++j ) {
params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind;
}
slaswp_launch( params, magma_stream );
}
}
/**
Purpose:
=============
SLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT REAL array on GPU, stored row-wise, dimension (LDA,N)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A. lda >= n.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1 <= n.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2 <= n.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
// It is used in sgessm, sgetrf_incpiv.
extern "C" void
magmablas_slaswp_q(
magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 1 || k1 > n )
info = -4;
else if ( k2 < 1 || k2 > n )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dAT n lda j0 npivots
slaswp_params_t params = { dAT+k*lda, n, lda, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
slaswp_launch( params, queue );
}
}
/**
@see magmablas_slaswp_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp( magma_int_t n, float *dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
magmablas_slaswp_q( n, dAT, lda, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
typedef struct {
float *dA;
int n, ldx, ldy, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswpx_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswpx_kernel( slaswpx_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < params.n ) {
int ldx = params.ldx;
float *dA = params.dA + tid*params.ldy + params.j0*ldx;
float *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
float *A2 = dA + i2*ldx;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
// Launch slaswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswpx( slaswpx_params_t ¶ms, magma_queue_t queue )
{
int blocks = (params.n + NTHREADS - 1) / NTHREADS;
slaswpx_kernel<<< blocks, NTHREADS, 0, queue >>>( params );
}
/**
Purpose:
=============
SLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dA REAL array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
ldx INTEGER
Stride between elements in same column.
\param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=lda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=lda.
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswpx_q(
magma_int_t n, float *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n ldx ldy j0 npivots
slaswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
slaswpx( params, queue );
}
}
/**
@see magmablas_slaswpx_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswpx( magma_int_t n, float *dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci )
{
return magmablas_slaswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream );
}
// ------------------------------------------------------------
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_slaswp
// (including copying pivots to the GPU).
__global__ void slaswp2_kernel(
int n, float *dAT, int lda, int npivots,
const magma_int_t* d_ipiv, magma_int_t inci )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if( tid < n ) {
dAT += tid;
float *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
float *A2 = dAT + i2*lda;
float temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += lda; // A1 = dA + i1*ldx
}
}
}
/**
Purpose:
=============
SLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
\param[in]
n INTEGER
The number of columns of the matrix A.
\param[in,out]
dAT REAL array on GPU, stored row-wise, dimension (LDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
\param[in]
lda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
\param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
\param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
\param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp2_q(
magma_int_t n, float* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
int blocks = (n + NTHREADS - 1) / NTHREADS;
slaswp2_kernel<<< blocks, NTHREADS, 0, queue >>>(
n, dAT + (k1-1)*lda, lda, k2-(k1-1), d_ipiv, inci );
}
/**
@see magmablas_slaswp2_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slaswp2( magma_int_t n, float* dAT, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *d_ipiv, magma_int_t inci )
{
magmablas_slaswp2_q( n, dAT, lda, k1, k2, d_ipiv, inci, magma_stream );
}
|
a2d9ac3dbaba6e0becf9005df266301308acb5ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/normalize_l1_op.h"
#include "caffe2/operators/normalize_op.h"
namespace caffe2 {
__global__ void NormalizeKernel(
const int m,
const int n,
const int sf,
const float* xData,
float* yData,
const float kEps) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < n; i += gridDim.x) {
auto base = (i / sf) * sf * m + (i % sf);
float sum = 0.0;
__shared__ float norm;
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto x_ij = xData[base + j * sf];
sum += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = sqrtf(reduce_result);
norm = fmaxf(norm, kEps);
}
__syncthreads();
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto index = base + j * sf;
yData[index] = xData[index] / norm;
}
}
}
__global__ void NormalizeGradientKernel(
const int M,
const int N,
const int SF,
const float* in_mat,
const float* grad_out_mat,
float* grad_mat,
const float kEps) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage_sum;
__shared__ BlockReduce::TempStorage temp_storage_norm;
for (int i = blockIdx.x; i < M; i += gridDim.x) {
float sum = 0.0;
float norm = 0.0;
__shared__ float row_sum;
__shared__ float row_norm;
__shared__ float row_norm_3;
auto base = (i / SF) * SF * N + (i % SF);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
int index = base + j * SF;
sum += in_mat[index] * grad_out_mat[index];
norm += in_mat[index] * in_mat[index];
}
float reduce_result = BlockReduce(temp_storage_sum).Sum(sum);
float reduce_norm = BlockReduce(temp_storage_norm).Sum(norm);
if (threadIdx.x == 0) {
row_sum = reduce_result;
row_norm = sqrtf(reduce_norm);
row_norm = fmaxf(row_norm, kEps);
row_norm_3 = powf(row_norm, 3);
}
__syncthreads();
for (int j = threadIdx.x; j < N; j += blockDim.x) {
int index = base + j * SF;
const float x_ij = in_mat[index];
const float dy_ij = grad_out_mat[index];
grad_mat[index] = (dy_ij / row_norm) - ((x_ij / row_norm_3) * row_sum);
}
}
}
template <>
void NormalizeOp<float, CUDAContext>::DoNormalize(
const float* xData,
float* yData,
const int m,
const int n,
const int sf) {
hipLaunchKernelGGL(( NormalizeKernel),
dim3(min(n, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), m, n, sf, xData, yData, kEps_);
}
template <>
bool NormalizeGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis =
X.canonical_axis_index(OperatorBase::GetSingleArgument<int>("axis", -1));
int N = X.dim32(canonical_axis);
int M = X.size() / N;
const int SF = X.size_from_dim(canonical_axis + 1);
hipLaunchKernelGGL(( NormalizeGradientKernel),
dim3(min(M, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
M,
N,
SF,
X.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
kEps_);
return true;
}
namespace {
__global__ void NormalizeL1Kernel(
const int m,
const int n,
const int sf,
const float* xData,
float* yData) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < n; i += gridDim.x) {
auto base = (i / sf) * sf * m + (i % sf);
float sum = 0.0;
__shared__ float norm;
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto x_ij = xData[base + j * sf];
sum += fabsf(x_ij);
}
float reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = reduce_result;
}
__syncthreads();
if (norm != 0) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto index = base + j * sf;
yData[index] = xData[index] / norm;
}
}
}
}
} // namespace
template <>
void NormalizeL1Op<float, CUDAContext>::DoNormalize(
const float* xData,
float* yData,
const int m,
const int n,
const int sf) {
hipLaunchKernelGGL(( NormalizeL1Kernel),
dim3(min(n, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), m, n, sf, xData, yData);
}
REGISTER_CUDA_OPERATOR(Normalize, NormalizeOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
NormalizeGradient,
NormalizeGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(NormalizeL1, NormalizeL1Op<float, CUDAContext>);
} // namespace caffe2
|
a2d9ac3dbaba6e0becf9005df266301308acb5ce.cu
|
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/normalize_l1_op.h"
#include "caffe2/operators/normalize_op.h"
namespace caffe2 {
__global__ void NormalizeKernel(
const int m,
const int n,
const int sf,
const float* xData,
float* yData,
const float kEps) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < n; i += gridDim.x) {
auto base = (i / sf) * sf * m + (i % sf);
float sum = 0.0;
__shared__ float norm;
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto x_ij = xData[base + j * sf];
sum += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = sqrtf(reduce_result);
norm = fmaxf(norm, kEps);
}
__syncthreads();
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto index = base + j * sf;
yData[index] = xData[index] / norm;
}
}
}
__global__ void NormalizeGradientKernel(
const int M,
const int N,
const int SF,
const float* in_mat,
const float* grad_out_mat,
float* grad_mat,
const float kEps) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage_sum;
__shared__ BlockReduce::TempStorage temp_storage_norm;
for (int i = blockIdx.x; i < M; i += gridDim.x) {
float sum = 0.0;
float norm = 0.0;
__shared__ float row_sum;
__shared__ float row_norm;
__shared__ float row_norm_3;
auto base = (i / SF) * SF * N + (i % SF);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
int index = base + j * SF;
sum += in_mat[index] * grad_out_mat[index];
norm += in_mat[index] * in_mat[index];
}
float reduce_result = BlockReduce(temp_storage_sum).Sum(sum);
float reduce_norm = BlockReduce(temp_storage_norm).Sum(norm);
if (threadIdx.x == 0) {
row_sum = reduce_result;
row_norm = sqrtf(reduce_norm);
row_norm = fmaxf(row_norm, kEps);
row_norm_3 = powf(row_norm, 3);
}
__syncthreads();
for (int j = threadIdx.x; j < N; j += blockDim.x) {
int index = base + j * SF;
const float x_ij = in_mat[index];
const float dy_ij = grad_out_mat[index];
grad_mat[index] = (dy_ij / row_norm) - ((x_ij / row_norm_3) * row_sum);
}
}
}
template <>
void NormalizeOp<float, CUDAContext>::DoNormalize(
const float* xData,
float* yData,
const int m,
const int n,
const int sf) {
NormalizeKernel<<<
min(n, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(m, n, sf, xData, yData, kEps_);
}
template <>
bool NormalizeGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis =
X.canonical_axis_index(OperatorBase::GetSingleArgument<int>("axis", -1));
int N = X.dim32(canonical_axis);
int M = X.size() / N;
const int SF = X.size_from_dim(canonical_axis + 1);
NormalizeGradientKernel<<<
min(M, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
M,
N,
SF,
X.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
kEps_);
return true;
}
namespace {
__global__ void NormalizeL1Kernel(
const int m,
const int n,
const int sf,
const float* xData,
float* yData) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < n; i += gridDim.x) {
auto base = (i / sf) * sf * m + (i % sf);
float sum = 0.0;
__shared__ float norm;
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto x_ij = xData[base + j * sf];
sum += fabsf(x_ij);
}
float reduce_result = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
norm = reduce_result;
}
__syncthreads();
if (norm != 0) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
const auto index = base + j * sf;
yData[index] = xData[index] / norm;
}
}
}
}
} // namespace
template <>
void NormalizeL1Op<float, CUDAContext>::DoNormalize(
const float* xData,
float* yData,
const int m,
const int n,
const int sf) {
NormalizeL1Kernel<<<
min(n, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(m, n, sf, xData, yData);
}
REGISTER_CUDA_OPERATOR(Normalize, NormalizeOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
NormalizeGradient,
NormalizeGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(NormalizeL1, NormalizeL1Op<float, CUDAContext>);
} // namespace caffe2
|
e2e33679a606ad974e8a1986bf691b61dd636d23.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/DistKLDivCriterion.hip"
#else
void THNN_(DistKLDivCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 2, input, target);
THArgCheck(THCTensor_(nElement)(state, input) == THCTensor_(nElement)(state, target), 2,
"input and target need to have the same number of elements");
if (reduction == Reduction::None) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, output,
kl_updateOutput_no_reduce_functor<scalar_t>());
return;
}
THCTensor_(resize1d)(state, output, 1);
accreal sum;
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus<accreal>(), kl_functor<scalar_t, accreal>());
if (reduction == Reduction::Mean)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
}
void THNN_(DistKLDivCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
THArgCheck(THCTensor_(nElement)(state, input) == THCTensor_(nElement)(state, target), 2,
"input and target need to have the same number of elements");
THCTensor_(resizeAs)(state, gradInput, input);
if (reduction == Reduction::None) {
THCUNN_check_shape(state, gradOutput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, target, gradOutput, gradInput,
kl_updateGradInput_no_reduce_functor<scalar_t>());
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
scalar_t norm = (reduction == Reduction::Mean ? ScalarConvert<accreal, scalar_t>::to(accreal(1)/size) : ScalarConvert<int, scalar_t>::to(1));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data,
kl_updateGradInput_functor<scalar_t>(norm, THCTensor_(get1d)(state, gradOutput, 0)));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
|
e2e33679a606ad974e8a1986bf691b61dd636d23.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/DistKLDivCriterion.cu"
#else
void THNN_(DistKLDivCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 2, input, target);
THArgCheck(THCTensor_(nElement)(state, input) == THCTensor_(nElement)(state, target), 2,
"input and target need to have the same number of elements");
if (reduction == Reduction::None) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, output,
kl_updateOutput_no_reduce_functor<scalar_t>());
return;
}
THCTensor_(resize1d)(state, output, 1);
accreal sum;
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (accreal) 0, thrust::plus<accreal>(), kl_functor<scalar_t, accreal>());
if (reduction == Reduction::Mean)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
}
void THNN_(DistKLDivCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
THArgCheck(THCTensor_(nElement)(state, input) == THCTensor_(nElement)(state, target), 2,
"input and target need to have the same number of elements");
THCTensor_(resizeAs)(state, gradInput, input);
if (reduction == Reduction::None) {
THCUNN_check_shape(state, gradOutput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, target, gradOutput, gradInput,
kl_updateGradInput_no_reduce_functor<scalar_t>());
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
scalar_t norm = (reduction == Reduction::Mean ? ScalarConvert<accreal, scalar_t>::to(accreal(1)/size) : ScalarConvert<int, scalar_t>::to(1));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data,
kl_updateGradInput_functor<scalar_t>(norm, THCTensor_(get1d)(state, gradOutput, 0)));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
|
96501b890f8287a431c62aa0ca391523779a00bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "test"
#define BLOCKSIZEX 128
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 192
#define ZDIM 4
#define TMAX 50000
#define STARTF 30000
#define OBSTR1 4.f
#define OBSTX1 63.5f
#define OBSTY1 63.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 47.75f //minimum x coord of LR
#define XLRDIM 64 //number of nodes in x
#define LRY0 47.75f
#define YLRDIM 64
#define LRZ0 -0.25f
#define ZLRDIM 8
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "BGK" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcnLR(float x, float y, float z){
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// return 10;
// else
// if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z<3)
// value = 1;
// if(z>ZDIM-4)
// value = 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
else if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z < 2 || z > ZDIM-3)
// value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 53)//DirichletWest
// {
// //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 54)//DirichletWest
// {
// //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//xsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//xsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void bgk_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
inline __device__ void mrt_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v);
f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v);
f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v);
f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v);
f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w ;
f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w);
f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w);
f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w ;
f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-w);
f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v-w);
f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+w);
f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
//SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__global__ void f_Extract(float* fout, float* fin, float* gin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
float zcoord = z+1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
float zcoord_LR = LRLEVEL*(zcoord-LRZ0)-1.f;//-1.f to account for g layer
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int zm = int(zcoord_LR);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = zcoord_LR-zm;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zm,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zm,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zm,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zm,pitch_f,zInner_f)];
float v100 = fin[f_memLR(i ,xm,ym,zp,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,zp,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,zp,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,zp,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fout[f_mem(0 ,x,y,z,pitch_c,zInner_c)] = f[0 ];
fout[f_mem(1 ,x,y,z,pitch_c,zInner_c)] = f[1 ];
fout[f_mem(2 ,x,y,z,pitch_c,zInner_c)] = f[2 ];
fout[f_mem(3 ,x,y,z,pitch_c,zInner_c)] = f[3 ];
fout[f_mem(4 ,x,y,z,pitch_c,zInner_c)] = f[4 ];
fout[f_mem(5 ,x,y,z,pitch_c,zInner_c)] = f[5 ];
fout[f_mem(6 ,x,y,z,pitch_c,zInner_c)] = f[6 ];
fout[f_mem(7 ,x,y,z,pitch_c,zInner_c)] = f[7 ];
fout[f_mem(8 ,x,y,z,pitch_c,zInner_c)] = f[8 ];
fout[f_mem(9 ,x,y,z,pitch_c,zInner_c)] = f[9 ];
fout[f_mem(10,x,y,z,pitch_c,zInner_c)] = f[10];
fout[f_mem(11,x,y,z,pitch_c,zInner_c)] = f[11];
fout[f_mem(12,x,y,z,pitch_c,zInner_c)] = f[12];
fout[f_mem(13,x,y,z,pitch_c,zInner_c)] = f[13];
fout[f_mem(14,x,y,z,pitch_c,zInner_c)] = f[14];
fout[f_mem(15,x,y,z,pitch_c,zInner_c)] = f[15];
fout[f_mem(16,x,y,z,pitch_c,zInner_c)] = f[16];
fout[f_mem(17,x,y,z,pitch_c,zInner_c)] = f[17];
fout[f_mem(18,x,y,z,pitch_c,zInner_c)] = f[18];
}
}
__global__ void g_Extract(float* gout, float* fin, float* gin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = 0;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = gin[buff_memLR(i ,xm,ym,pitch_f)];
float v001 = gin[buff_memLR(i ,xp,ym,pitch_f)];
float v010 = gin[buff_memLR(i ,xm,yp,pitch_f)];
float v011 = gin[buff_memLR(i ,xp,yp,pitch_f)];
float v100 = fin[f_memLR(i ,xm,ym,0,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,0,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,0,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,0,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
gout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
gout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
gout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
gout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
gout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
gout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
gout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
gout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
gout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
gout[buff_mem(10,x,y,pitch_c)] = f[10];
gout[buff_mem(11,x,y,pitch_c)] = f[11];
gout[buff_mem(12,x,y,pitch_c)] = f[12];
gout[buff_mem(13,x,y,pitch_c)] = f[13];
gout[buff_mem(14,x,y,pitch_c)] = f[14];
gout[buff_mem(15,x,y,pitch_c)] = f[15];
gout[buff_mem(16,x,y,pitch_c)] = f[16];
gout[buff_mem(17,x,y,pitch_c)] = f[17];
gout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void h_Extract(float* hout, float* fin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = zInner_c+2-1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zInner_f-1,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zInner_f-1,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zInner_f-1,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zInner_f-1,pitch_f,zInner_f)];
float v100 = hin[buff_memLR(i ,xm,ym,pitch_f)];
float v101 = hin[buff_memLR(i ,xp,ym,pitch_f)];
float v110 = hin[buff_memLR(i ,xm,yp,pitch_f)];
float v111 = hin[buff_memLR(i ,xp,yp,pitch_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
hout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
hout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
hout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
hout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
hout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
hout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
hout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
hout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
hout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
hout[buff_mem(10,x,y,pitch_c)] = f[10];
hout[buff_mem(11,x,y,pitch_c)] = f[11];
hout[buff_mem(12,x,y,pitch_c)] = f[12];
hout[buff_mem(13,x,y,pitch_c)] = f[13];
hout[buff_mem(14,x,y,pitch_c)] = f[14];
hout[buff_mem(15,x,y,pitch_c)] = f[15];
hout[buff_mem(16,x,y,pitch_c)] = f[16];
hout[buff_mem(17,x,y,pitch_c)] = f[17];
hout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_inner_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom_LR(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_top_LR(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
// f9 = hA [buff_memLR(9 ,x ,y ,pitch)];
// f10= hA [buff_memLR(10,x-1,y ,pitch)];
// f11= hA [buff_memLR(11,x ,y-1,pitch)];
// f12= hA [buff_memLR(12,x+1,y ,pitch)];
// f13= hA [buff_memLR(13,x ,y+1,pitch)];
// f14= hA [buff_memLR(9 ,x ,y ,pitch)];
// f15= hA [buff_memLR(10,x-1,y ,pitch)];
// f16= hA [buff_memLR(11,x ,y-1,pitch)];
// f17= hA [buff_memLR(12,x+1,y ,pitch)];
// f18= hA [buff_memLR(13,x ,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_inner_LR_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_LR_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_LR_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR_interp(float* fA, float* fB, float* g, float* h, float* f_c, float* g_c, float* h_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+(1+z)*LRFACTOR;//local zcoord within GPU
int im = ImageFcnLR(xcoord,ycoord,GPU*(zInner_c+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<1)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = g_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = g_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = g_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = g_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = f_c[ f_mem(i ,xm,ym,0 ,pitch_c,zInner_c)];
float v101 = f_c[ f_mem(i ,xp,ym,0 ,pitch_c,zInner_c)];
float v110 = f_c[ f_mem(i ,xm,yp,0 ,pitch_c,zInner_c)];
float v111 = f_c[ f_mem(i ,xp,yp,0 ,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
else if(zcoord>(zInner_c+2)-2)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[ f_mem(i ,xm,ym,zInner_c-1,pitch_c,zInner_c)];
float v001 = f_c[ f_mem(i ,xp,ym,zInner_c-1,pitch_c,zInner_c)];
float v010 = f_c[ f_mem(i ,xm,yp,zInner_c-1,pitch_c,zInner_c)];
float v011 = f_c[ f_mem(i ,xp,yp,zInner_c-1,pitch_c,zInner_c)];
float v100 = h_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = h_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = h_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = h_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord>ZDIM
else{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[f_mem(i ,xm,ym,zm-1,pitch_c,zInner_c)];//-1 to correct for index in f
float v001 = f_c[f_mem(i ,xp,ym,zm-1,pitch_c,zInner_c)];
float v010 = f_c[f_mem(i ,xm,yp,zm-1,pitch_c,zInner_c)];
float v011 = f_c[f_mem(i ,xp,yp,zm-1,pitch_c,zInner_c)];
float v100 = f_c[f_mem(i ,xm,ym,zp-1,pitch_c,zInner_c)];
float v101 = f_c[f_mem(i ,xp,ym,zp-1,pitch_c,zInner_c)];
float v110 = f_c[f_mem(i ,xm,yp,zp-1,pitch_c,zInner_c)];
float v111 = f_c[f_mem(i ,xp,yp,zp-1,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f[0 ];
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[1 ];
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[2 ];
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[3 ];
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[4 ];
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[5 ];
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[6 ];
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[7 ];
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[8 ];
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[9 ];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[11];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[18];
}
else
{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}//end else (no interp)
}
__global__ void update_bottom_LR_interp(float* gA, float* gB, float* f, float* temp, float* g_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0;
int im = ImageFcnLR(xcoord,ycoord,zcoord+GPU*LRFACTOR*ZLRDIM);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<0)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord)-1;//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = g_c[buff_mem(i ,xm,ym,pitch_c)];
float v101 = g_c[buff_mem(i ,xp,ym,pitch_c)];
float v110 = g_c[buff_mem(i ,xm,yp,pitch_c)];
float v111 = g_c[buff_mem(i ,xp,yp,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
gB[buff_memLR(10,x,y,pitch)] = f[10];
gB[buff_memLR(11,x,y,pitch)] = f[11];
gB[buff_memLR(12,x,y,pitch)] = f[12];
gB[buff_memLR(13,x,y,pitch)] = f[13];
gB[buff_memLR(14,x,y,pitch)] = f[14];
gB[buff_memLR(15,x,y,pitch)] = f[15];
gB[buff_memLR(16,x,y,pitch)] = f[16];
gB[buff_memLR(17,x,y,pitch)] = f[17];
gB[buff_memLR(18,x,y,pitch)] = f[18];
}
else
{
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR(14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR(15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR(16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR(17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__global__ void update_top_LR_interp(float* hA, float* hB, float* f, float* temp, float* h_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int im = ImageFcnLR(xcoord,ycoord,GPU*LRFACTOR*(zInner+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = h_c[buff_mem(i ,xm,ym,pitch_c)];
float v001 = h_c[buff_mem(i ,xp,ym,pitch_c)];
float v010 = h_c[buff_mem(i ,xm,yp,pitch_c)];
float v011 = h_c[buff_mem(i ,xp,yp,pitch_c)];
float v100 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
// }//end zcoord>ZDIM
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
hB[buff_memLR(10,x,y,pitch)] = f[10];
hB[buff_memLR(11,x,y,pitch)] = f[11];
hB[buff_memLR(12,x,y,pitch)] = f[12];
hB[buff_memLR(13,x,y,pitch)] = f[13];
hB[buff_memLR(14,x,y,pitch)] = f[14];
hB[buff_memLR(15,x,y,pitch)] = f[15];
hB[buff_memLR(16,x,y,pitch)] = f[16];
hB[buff_memLR(17,x,y,pitch)] = f[17];
hB[buff_memLR(18,x,y,pitch)] = f[18];
}
else{//not LR interp region
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int yDim, int zDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
}
int j = x+y*pitch+z*yDim*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]=f0 ;
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]=f1 ;
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]=f2 ;
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]=f3 ;
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]=f4 ;
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]=f5 ;
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]=f6 ;
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]=f7 ;
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]=f8 ;
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]=f9 ;
f[j+10*pitch*yDim*(zDim/GPU_N-2)]=f10;
f[j+11*pitch*yDim*(zDim/GPU_N-2)]=f11;
f[j+12*pitch*yDim*(zDim/GPU_N-2)]=f12;
f[j+13*pitch*yDim*(zDim/GPU_N-2)]=f13;
f[j+14*pitch*yDim*(zDim/GPU_N-2)]=f14;
f[j+15*pitch*yDim*(zDim/GPU_N-2)]=f15;
f[j+16*pitch*yDim*(zDim/GPU_N-2)]=f16;
f[j+17*pitch*yDim*(zDim/GPU_N-2)]=f17;
f[j+18*pitch*yDim*(zDim/GPU_N-2)]=f18;
}
}
__global__ void initialize_buffer(float *g, size_t pitch, int yDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = x;
float ycoord = y;
float zcoord = 0+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0;
}
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*yDim]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*yDim]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
g[j+0 *pitch*yDim]=f0 ;
g[j+1 *pitch*yDim]=f1 ;
g[j+2 *pitch*yDim]=f2 ;
g[j+3 *pitch*yDim]=f3 ;
g[j+4 *pitch*yDim]=f4 ;
g[j+5 *pitch*yDim]=f5 ;
g[j+6 *pitch*yDim]=f6 ;
g[j+7 *pitch*yDim]=f7 ;
g[j+8 *pitch*yDim]=f8 ;
g[j+9 *pitch*yDim]=f9 ;
g[j+10*pitch*yDim]=f10;
g[j+11*pitch*yDim]=f11;
g[j+12*pitch*yDim]=f12;
g[j+13*pitch*yDim]=f13;
g[j+14*pitch*yDim]=f14;
g[j+15*pitch*yDim]=f15;
g[j+16*pitch*yDim]=f16;
g[j+17*pitch*yDim]=f17;
g[j+18*pitch*yDim]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
void WriteResults(float *f, ofstream &output, float omega, int xDim, int yDim, int zMin, int zNum, float x0, float y0, float z0, float scale)
{
for(int k = 0; k<zNum; k++){
for(int i = 0; i<yDim; i++){
for(int j = 0; j<xDim; j++){
//int index = i*xDim+j;
float f0 = f[(j+i*xDim+k*yDim*xDim)+0 *xDim*yDim*zNum];
float f1 = f[(j+i*xDim+k*yDim*xDim)+1 *xDim*yDim*zNum];
float f2 = f[(j+i*xDim+k*yDim*xDim)+2 *xDim*yDim*zNum];
float f3 = f[(j+i*xDim+k*yDim*xDim)+3 *xDim*yDim*zNum];
float f4 = f[(j+i*xDim+k*yDim*xDim)+4 *xDim*yDim*zNum];
float f5 = f[(j+i*xDim+k*yDim*xDim)+5 *xDim*yDim*zNum];
float f6 = f[(j+i*xDim+k*yDim*xDim)+6 *xDim*yDim*zNum];
float f7 = f[(j+i*xDim+k*yDim*xDim)+7 *xDim*yDim*zNum];
float f8 = f[(j+i*xDim+k*yDim*xDim)+8 *xDim*yDim*zNum];
float f9 = f[(j+i*xDim+k*yDim*xDim)+9 *xDim*yDim*zNum];
float f10= f[(j+i*xDim+k*yDim*xDim)+10*xDim*yDim*zNum];
float f11= f[(j+i*xDim+k*yDim*xDim)+11*xDim*yDim*zNum];
float f12= f[(j+i*xDim+k*yDim*xDim)+12*xDim*yDim*zNum];
float f13= f[(j+i*xDim+k*yDim*xDim)+13*xDim*yDim*zNum];
float f14= f[(j+i*xDim+k*yDim*xDim)+14*xDim*yDim*zNum];
float f15= f[(j+i*xDim+k*yDim*xDim)+15*xDim*yDim*zNum];
float f16= f[(j+i*xDim+k*yDim*xDim)+16*xDim*yDim*zNum];
float f17= f[(j+i*xDim+k*yDim*xDim)+17*xDim*yDim*zNum];
float f18= f[(j+i*xDim+k*yDim*xDim)+18*xDim*yDim*zNum];
// float f2 = f[index+xDim*yDim*zNum*2 ];
// float f3 = f[index+xDim*yDim*zNum*3 ];
// float f4 = f[index+xDim*yDim*zNum*4 ];
// float f5 = f[index+xDim*yDim*zNum*5 ];
// float f6 = f[index+xDim*yDim*zNum*6 ];
// float f7 = f[index+xDim*yDim*zNum*7 ];
// float f8 = f[index+xDim*yDim*zNum*8 ];
// float f9 = f[index+xDim*yDim*zNum*9 ];
// float f10= f[index+xDim*yDim*zNum*10];
// float f11= f[index+xDim*yDim*zNum*11];
// float f12= f[index+xDim*yDim*zNum*12];
// float f13= f[index+xDim*yDim*zNum*13];
// float f14= f[index+xDim*yDim*zNum*14];
// float f15= f[index+xDim*yDim*zNum*15];
// float f16= f[index+xDim*yDim*zNum*16];
// float f17= f[index+xDim*yDim*zNum*17];
// float f18= f[index+xDim*yDim*zNum*18];
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<x0+scale*j<<", "<<y0+scale*i<<", "<<z0+scale*(zMin+k)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}}}
}
void WriteForces(float *FX, float *FY, float *FZ, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+ForceTime<<", "<<FX[i]/ref<<", "<<FY[i]/ref<<", "<<FZ[i]/ref<<endl;
}
}
int main(int argc, char *argv[])
{
int GPU_N;
hipGetDeviceCount(&GPU_N);
//GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
int outputflag = 1;
if(argc>1){
if(strcmp(argv[1],"-no")==0){
outputflag = 0;
cout<<"no outputs option\n";
}
}
ofstream output;
ofstream outputForce;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
size_t pitch_elements = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<"omega : "<<omega<<endl;
cout<<"omegaLR : "<<omegaLR<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"gridLR: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
cout<<"Refinement: "<<LRLEVEL<<endl;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *FX_h[GPU_N],*FY_h[GPU_N],*FZ_h[GPU_N];
float *FX_d[GPU_N],*FY_d[GPU_N],*FZ_d[GPU_N];
float *FX_total,*FY_total,*FZ_total;
FX_total = (float *)malloc(ForceTime*sizeof(float));
FY_total = (float *)malloc(ForceTime*sizeof(float));
FZ_total = (float *)malloc(ForceTime*sizeof(float));
for(i=0;i<(ForceTime);i++){
FX_total[i] = 0;
FY_total[i] = 0;
FZ_total[i] = 0;
}
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
FX_h [n] = (float *)malloc(ForceTime*sizeof(float));
FY_h [n] = (float *)malloc(ForceTime*sizeof(float));
FZ_h [n] = (float *)malloc(ForceTime*sizeof(float));
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
hipDeviceEnablePeerAccess(m,0);
}
hipMalloc((void **) &f_inner_A_d[n], pitch*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &f_inner_B_d[n], pitch*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) & g_A_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_B_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_A_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_B_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_temp[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & FX_d[n], (ForceTime)*sizeof(float));
hipMalloc((void **) & FY_d[n], (ForceTime)*sizeof(float));
hipMalloc((void **) & FZ_d[n], (ForceTime)*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(i=0;i<(ForceTime);i++){
FX_h[n][i] = 0;
FY_h[n][i] = 0;
FZ_h[n][i] = 0;
}
hipMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy( FX_d[n], FX_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipMemcpy( FY_d[n], FY_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipMemcpy( FZ_d[n], FZ_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_A_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_B_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_A_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_B_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_A_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_B_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_temp[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_temp[n],pitch_elements,YDIM,GPU_N,0);
}//end Malloc and Initialize
//data pointers for LR
float *f_inner_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_inner_LR_A_d[GPU_N], *g_LR_A_d[GPU_N], *h_LR_A_d[GPU_N];
float *f_inner_LR_B_d[GPU_N], *g_LR_B_d[GPU_N], *h_LR_B_d[GPU_N];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
size_t LRpitch = 2;
while(LRpitch<XLRDIM)
LRpitch=LRpitch*2;
LRpitch = LRpitch*sizeof(float);
size_t LRpitch_elements = LRpitch/sizeof(float);
cout<<"LR Pitch (in elements): "<<LRpitch/sizeof(float)<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LRthreads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LRgrid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
//LR setup
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_inner_LR_h[n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
hipSetDevice(n);
hipMalloc((void **) &f_inner_LR_A_d[n], LRpitch*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) &f_inner_LR_B_d[n], LRpitch*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) & g_LR_A_d[n], LRpitch*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & g_LR_B_d[n], LRpitch*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_A_d[n], LRpitch*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_B_d[n], LRpitch*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & g_LR_temp[n], LRpitch*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_temp[n], LRpitch*YLRDIM* 19*sizeof(float));
//initialize host f_inner
for (i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_inner_LR_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
hipMemcpy2D(f_inner_LR_A_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(f_inner_LR_B_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D( g_LR_A_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( g_LR_B_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_LR_A_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_LR_B_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize_single), dim3(LRgrid) , dim3(LRthreads), 0, 0, f_inner_LR_A_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_single), dim3(LRgrid) , dim3(LRthreads), 0, 0, f_inner_LR_B_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_top_force) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
hipLaunchKernelGGL(( update_bottom_force), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_inner_force) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][0],n,&g_B_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][0],n,&h_B_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF){
hipLaunchKernelGGL(( update_top_LR_force) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
hipLaunchKernelGGL(( update_bottom_LR_force), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_top_LR) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
hipLaunchKernelGGL(( update_bottom_LR), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF){
hipLaunchKernelGGL(( update_inner_LR_force) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_inner_LR) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR_interp) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_B_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
hipLaunchKernelGGL(( update_bottom_LR_interp), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_B_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inner_LR_interp), dim3(LRgrid),dim3(LRthreads),0,stream_inner[n], f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_B_d[n],g_B_d[n],h_B_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( f_Extract), dim3(grid),dim3(threads),0,stream_inner[n], f_inner_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( g_Extract), dim3(grid),dim3(threads),0,stream_inner[n], g_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( h_Extract), dim3(grid),dim3(threads),0,stream_inner[n], h_B_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
//B->A
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_top_force) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_B_d[n],h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
hipLaunchKernelGGL(( update_bottom_force), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_B_d[n],g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_inner_force) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][0],n,&g_A_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][0],n,&h_A_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF){
hipLaunchKernelGGL(( update_top_LR_force) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
hipLaunchKernelGGL(( update_bottom_LR_force), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
hipLaunchKernelGGL(( update_top_LR) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
hipLaunchKernelGGL(( update_bottom_LR), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF){
hipLaunchKernelGGL(( update_inner_LR_force) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
hipLaunchKernelGGL(( update_inner_LR) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR_interp) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_A_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
hipLaunchKernelGGL(( update_bottom_LR_interp), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_A_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inner_LR_interp) , dim3(LRgrid),dim3(LRthreads),0,stream_inner[n], f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_A_d[n],g_A_d[n],h_A_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( f_Extract), dim3(grid),dim3(threads),0,stream_inner[n], f_inner_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( g_Extract), dim3(grid),dim3(threads),0,stream_inner[n], g_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( h_Extract), dim3(grid),dim3(threads),0,stream_inner[n], h_A_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
}//end Time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(outputflag == 1){
hipMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy( FX_h[n],FX_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
hipMemcpy( FY_h[n],FY_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
hipMemcpy( FZ_h[n],FZ_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
//Write results
WriteResults( g_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n ,1 ,0,0,0,1);
WriteResults(f_inner_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n+1 ,zInner,0,0,0,1);
WriteResults( h_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*(n+1)-1,1 ,0,0,0,1);
}
for(int i=0;i<ForceTime;i++){
FX_total[i] += FX_h[n][i];
FY_total[i] += FY_h[n][i];
FZ_total[i] += FZ_h[n][i];
}
hipFree(f_inner_A_d[n]);
hipFree(f_inner_B_d[n]);
hipFree( g_A_d[n]);
hipFree( g_B_d[n]);
hipFree( h_A_d[n]);
hipFree( h_B_d[n]);
hipFree( g_temp[n]);
hipFree( h_temp[n]);
}//end write results
WriteForces(FX_total,FY_total,FZ_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(outputflag == 1){
hipMemcpy2D(f_inner_LR_h[n],XLRDIM*sizeof(float),f_inner_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D( g_LR_h[n],XLRDIM*sizeof(float), g_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( h_LR_h[n],XLRDIM*sizeof(float), h_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyDeviceToHost);
//Write results
WriteResults( g_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n ,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
WriteResults(f_inner_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n+1 ,zLRInner,LRX0,LRY0,LRZ0,LRFACTOR);
WriteResults( h_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*(n+1)-1,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
}
hipFree(f_inner_LR_A_d[n]);
hipFree(f_inner_LR_B_d[n]);
hipFree( g_LR_A_d[n]);
hipFree( g_LR_B_d[n]);
hipFree( h_LR_A_d[n]);
hipFree( h_LR_B_d[n]);
hipFree( g_LR_temp[n]);
hipFree( h_LR_temp[n]);
}//end GPU loop for LR
}//end write results of LR
return(0);
}
|
96501b890f8287a431c62aa0ca391523779a00bd.cu
|
#include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "test"
#define BLOCKSIZEX 128
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 192
#define ZDIM 4
#define TMAX 50000
#define STARTF 30000
#define OBSTR1 4.f
#define OBSTX1 63.5f
#define OBSTY1 63.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 47.75f //minimum x coord of LR
#define XLRDIM 64 //number of nodes in x
#define LRY0 47.75f
#define YLRDIM 64
#define LRZ0 -0.25f
#define ZLRDIM 8
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "BGK" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcnLR(float x, float y, float z){
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// return 10;
// else
// if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z<3)
// value = 1;
// if(z>ZDIM-4)
// value = 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
else if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z < 2 || z > ZDIM-3)
// value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 53)//DirichletWest
// {
// //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 54)//DirichletWest
// {
// //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//xsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//xsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void bgk_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
inline __device__ void mrt_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v);
f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v);
f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v);
f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v);
f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w ;
f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w);
f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w);
f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w ;
f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-w);
f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v-w);
f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+w);
f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
//SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__global__ void f_Extract(float* fout, float* fin, float* gin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
float zcoord = z+1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
float zcoord_LR = LRLEVEL*(zcoord-LRZ0)-1.f;//-1.f to account for g layer
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int zm = int(zcoord_LR);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = zcoord_LR-zm;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zm,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zm,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zm,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zm,pitch_f,zInner_f)];
float v100 = fin[f_memLR(i ,xm,ym,zp,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,zp,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,zp,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,zp,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fout[f_mem(0 ,x,y,z,pitch_c,zInner_c)] = f[0 ];
fout[f_mem(1 ,x,y,z,pitch_c,zInner_c)] = f[1 ];
fout[f_mem(2 ,x,y,z,pitch_c,zInner_c)] = f[2 ];
fout[f_mem(3 ,x,y,z,pitch_c,zInner_c)] = f[3 ];
fout[f_mem(4 ,x,y,z,pitch_c,zInner_c)] = f[4 ];
fout[f_mem(5 ,x,y,z,pitch_c,zInner_c)] = f[5 ];
fout[f_mem(6 ,x,y,z,pitch_c,zInner_c)] = f[6 ];
fout[f_mem(7 ,x,y,z,pitch_c,zInner_c)] = f[7 ];
fout[f_mem(8 ,x,y,z,pitch_c,zInner_c)] = f[8 ];
fout[f_mem(9 ,x,y,z,pitch_c,zInner_c)] = f[9 ];
fout[f_mem(10,x,y,z,pitch_c,zInner_c)] = f[10];
fout[f_mem(11,x,y,z,pitch_c,zInner_c)] = f[11];
fout[f_mem(12,x,y,z,pitch_c,zInner_c)] = f[12];
fout[f_mem(13,x,y,z,pitch_c,zInner_c)] = f[13];
fout[f_mem(14,x,y,z,pitch_c,zInner_c)] = f[14];
fout[f_mem(15,x,y,z,pitch_c,zInner_c)] = f[15];
fout[f_mem(16,x,y,z,pitch_c,zInner_c)] = f[16];
fout[f_mem(17,x,y,z,pitch_c,zInner_c)] = f[17];
fout[f_mem(18,x,y,z,pitch_c,zInner_c)] = f[18];
}
}
__global__ void g_Extract(float* gout, float* fin, float* gin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = 0;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = gin[buff_memLR(i ,xm,ym,pitch_f)];
float v001 = gin[buff_memLR(i ,xp,ym,pitch_f)];
float v010 = gin[buff_memLR(i ,xm,yp,pitch_f)];
float v011 = gin[buff_memLR(i ,xp,yp,pitch_f)];
float v100 = fin[f_memLR(i ,xm,ym,0,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,0,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,0,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,0,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
gout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
gout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
gout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
gout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
gout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
gout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
gout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
gout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
gout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
gout[buff_mem(10,x,y,pitch_c)] = f[10];
gout[buff_mem(11,x,y,pitch_c)] = f[11];
gout[buff_mem(12,x,y,pitch_c)] = f[12];
gout[buff_mem(13,x,y,pitch_c)] = f[13];
gout[buff_mem(14,x,y,pitch_c)] = f[14];
gout[buff_mem(15,x,y,pitch_c)] = f[15];
gout[buff_mem(16,x,y,pitch_c)] = f[16];
gout[buff_mem(17,x,y,pitch_c)] = f[17];
gout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void h_Extract(float* hout, float* fin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = zInner_c+2-1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zInner_f-1,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zInner_f-1,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zInner_f-1,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zInner_f-1,pitch_f,zInner_f)];
float v100 = hin[buff_memLR(i ,xm,ym,pitch_f)];
float v101 = hin[buff_memLR(i ,xp,ym,pitch_f)];
float v110 = hin[buff_memLR(i ,xm,yp,pitch_f)];
float v111 = hin[buff_memLR(i ,xp,yp,pitch_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
hout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
hout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
hout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
hout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
hout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
hout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
hout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
hout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
hout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
hout[buff_mem(10,x,y,pitch_c)] = f[10];
hout[buff_mem(11,x,y,pitch_c)] = f[11];
hout[buff_mem(12,x,y,pitch_c)] = f[12];
hout[buff_mem(13,x,y,pitch_c)] = f[13];
hout[buff_mem(14,x,y,pitch_c)] = f[14];
hout[buff_mem(15,x,y,pitch_c)] = f[15];
hout[buff_mem(16,x,y,pitch_c)] = f[16];
hout[buff_mem(17,x,y,pitch_c)] = f[17];
hout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_inner_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom_LR(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_top_LR(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
// f9 = hA [buff_memLR(9 ,x ,y ,pitch)];
// f10= hA [buff_memLR(10,x-1,y ,pitch)];
// f11= hA [buff_memLR(11,x ,y-1,pitch)];
// f12= hA [buff_memLR(12,x+1,y ,pitch)];
// f13= hA [buff_memLR(13,x ,y+1,pitch)];
// f14= hA [buff_memLR(9 ,x ,y ,pitch)];
// f15= hA [buff_memLR(10,x-1,y ,pitch)];
// f16= hA [buff_memLR(11,x ,y-1,pitch)];
// f17= hA [buff_memLR(12,x+1,y ,pitch)];
// f18= hA [buff_memLR(13,x ,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_inner_LR_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_LR_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_LR_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR_interp(float* fA, float* fB, float* g, float* h, float* f_c, float* g_c, float* h_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+(1+z)*LRFACTOR;//local zcoord within GPU
int im = ImageFcnLR(xcoord,ycoord,GPU*(zInner_c+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<1)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = g_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = g_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = g_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = g_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = f_c[ f_mem(i ,xm,ym,0 ,pitch_c,zInner_c)];
float v101 = f_c[ f_mem(i ,xp,ym,0 ,pitch_c,zInner_c)];
float v110 = f_c[ f_mem(i ,xm,yp,0 ,pitch_c,zInner_c)];
float v111 = f_c[ f_mem(i ,xp,yp,0 ,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
else if(zcoord>(zInner_c+2)-2)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[ f_mem(i ,xm,ym,zInner_c-1,pitch_c,zInner_c)];
float v001 = f_c[ f_mem(i ,xp,ym,zInner_c-1,pitch_c,zInner_c)];
float v010 = f_c[ f_mem(i ,xm,yp,zInner_c-1,pitch_c,zInner_c)];
float v011 = f_c[ f_mem(i ,xp,yp,zInner_c-1,pitch_c,zInner_c)];
float v100 = h_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = h_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = h_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = h_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord>ZDIM
else{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[f_mem(i ,xm,ym,zm-1,pitch_c,zInner_c)];//-1 to correct for index in f
float v001 = f_c[f_mem(i ,xp,ym,zm-1,pitch_c,zInner_c)];
float v010 = f_c[f_mem(i ,xm,yp,zm-1,pitch_c,zInner_c)];
float v011 = f_c[f_mem(i ,xp,yp,zm-1,pitch_c,zInner_c)];
float v100 = f_c[f_mem(i ,xm,ym,zp-1,pitch_c,zInner_c)];
float v101 = f_c[f_mem(i ,xp,ym,zp-1,pitch_c,zInner_c)];
float v110 = f_c[f_mem(i ,xm,yp,zp-1,pitch_c,zInner_c)];
float v111 = f_c[f_mem(i ,xp,yp,zp-1,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f[0 ];
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[1 ];
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[2 ];
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[3 ];
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[4 ];
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[5 ];
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[6 ];
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[7 ];
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[8 ];
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[9 ];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[11];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[18];
}
else
{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}//end else (no interp)
}
__global__ void update_bottom_LR_interp(float* gA, float* gB, float* f, float* temp, float* g_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0;
int im = ImageFcnLR(xcoord,ycoord,zcoord+GPU*LRFACTOR*ZLRDIM);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<0)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord)-1;//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = g_c[buff_mem(i ,xm,ym,pitch_c)];
float v101 = g_c[buff_mem(i ,xp,ym,pitch_c)];
float v110 = g_c[buff_mem(i ,xm,yp,pitch_c)];
float v111 = g_c[buff_mem(i ,xp,yp,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
gB[buff_memLR(10,x,y,pitch)] = f[10];
gB[buff_memLR(11,x,y,pitch)] = f[11];
gB[buff_memLR(12,x,y,pitch)] = f[12];
gB[buff_memLR(13,x,y,pitch)] = f[13];
gB[buff_memLR(14,x,y,pitch)] = f[14];
gB[buff_memLR(15,x,y,pitch)] = f[15];
gB[buff_memLR(16,x,y,pitch)] = f[16];
gB[buff_memLR(17,x,y,pitch)] = f[17];
gB[buff_memLR(18,x,y,pitch)] = f[18];
}
else
{
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR(14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR(15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR(16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR(17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__global__ void update_top_LR_interp(float* hA, float* hB, float* f, float* temp, float* h_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int im = ImageFcnLR(xcoord,ycoord,GPU*LRFACTOR*(zInner+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = h_c[buff_mem(i ,xm,ym,pitch_c)];
float v001 = h_c[buff_mem(i ,xp,ym,pitch_c)];
float v010 = h_c[buff_mem(i ,xm,yp,pitch_c)];
float v011 = h_c[buff_mem(i ,xp,yp,pitch_c)];
float v100 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
// }//end zcoord>ZDIM
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
hB[buff_memLR(10,x,y,pitch)] = f[10];
hB[buff_memLR(11,x,y,pitch)] = f[11];
hB[buff_memLR(12,x,y,pitch)] = f[12];
hB[buff_memLR(13,x,y,pitch)] = f[13];
hB[buff_memLR(14,x,y,pitch)] = f[14];
hB[buff_memLR(15,x,y,pitch)] = f[15];
hB[buff_memLR(16,x,y,pitch)] = f[16];
hB[buff_memLR(17,x,y,pitch)] = f[17];
hB[buff_memLR(18,x,y,pitch)] = f[18];
}
else{//not LR interp region
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int yDim, int zDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
}
int j = x+y*pitch+z*yDim*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]=f0 ;
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]=f1 ;
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]=f2 ;
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]=f3 ;
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]=f4 ;
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]=f5 ;
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]=f6 ;
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]=f7 ;
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]=f8 ;
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]=f9 ;
f[j+10*pitch*yDim*(zDim/GPU_N-2)]=f10;
f[j+11*pitch*yDim*(zDim/GPU_N-2)]=f11;
f[j+12*pitch*yDim*(zDim/GPU_N-2)]=f12;
f[j+13*pitch*yDim*(zDim/GPU_N-2)]=f13;
f[j+14*pitch*yDim*(zDim/GPU_N-2)]=f14;
f[j+15*pitch*yDim*(zDim/GPU_N-2)]=f15;
f[j+16*pitch*yDim*(zDim/GPU_N-2)]=f16;
f[j+17*pitch*yDim*(zDim/GPU_N-2)]=f17;
f[j+18*pitch*yDim*(zDim/GPU_N-2)]=f18;
}
}
__global__ void initialize_buffer(float *g, size_t pitch, int yDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = x;
float ycoord = y;
float zcoord = 0+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0;
}
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*yDim]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*yDim]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
g[j+0 *pitch*yDim]=f0 ;
g[j+1 *pitch*yDim]=f1 ;
g[j+2 *pitch*yDim]=f2 ;
g[j+3 *pitch*yDim]=f3 ;
g[j+4 *pitch*yDim]=f4 ;
g[j+5 *pitch*yDim]=f5 ;
g[j+6 *pitch*yDim]=f6 ;
g[j+7 *pitch*yDim]=f7 ;
g[j+8 *pitch*yDim]=f8 ;
g[j+9 *pitch*yDim]=f9 ;
g[j+10*pitch*yDim]=f10;
g[j+11*pitch*yDim]=f11;
g[j+12*pitch*yDim]=f12;
g[j+13*pitch*yDim]=f13;
g[j+14*pitch*yDim]=f14;
g[j+15*pitch*yDim]=f15;
g[j+16*pitch*yDim]=f16;
g[j+17*pitch*yDim]=f17;
g[j+18*pitch*yDim]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
void WriteResults(float *f, ofstream &output, float omega, int xDim, int yDim, int zMin, int zNum, float x0, float y0, float z0, float scale)
{
for(int k = 0; k<zNum; k++){
for(int i = 0; i<yDim; i++){
for(int j = 0; j<xDim; j++){
//int index = i*xDim+j;
float f0 = f[(j+i*xDim+k*yDim*xDim)+0 *xDim*yDim*zNum];
float f1 = f[(j+i*xDim+k*yDim*xDim)+1 *xDim*yDim*zNum];
float f2 = f[(j+i*xDim+k*yDim*xDim)+2 *xDim*yDim*zNum];
float f3 = f[(j+i*xDim+k*yDim*xDim)+3 *xDim*yDim*zNum];
float f4 = f[(j+i*xDim+k*yDim*xDim)+4 *xDim*yDim*zNum];
float f5 = f[(j+i*xDim+k*yDim*xDim)+5 *xDim*yDim*zNum];
float f6 = f[(j+i*xDim+k*yDim*xDim)+6 *xDim*yDim*zNum];
float f7 = f[(j+i*xDim+k*yDim*xDim)+7 *xDim*yDim*zNum];
float f8 = f[(j+i*xDim+k*yDim*xDim)+8 *xDim*yDim*zNum];
float f9 = f[(j+i*xDim+k*yDim*xDim)+9 *xDim*yDim*zNum];
float f10= f[(j+i*xDim+k*yDim*xDim)+10*xDim*yDim*zNum];
float f11= f[(j+i*xDim+k*yDim*xDim)+11*xDim*yDim*zNum];
float f12= f[(j+i*xDim+k*yDim*xDim)+12*xDim*yDim*zNum];
float f13= f[(j+i*xDim+k*yDim*xDim)+13*xDim*yDim*zNum];
float f14= f[(j+i*xDim+k*yDim*xDim)+14*xDim*yDim*zNum];
float f15= f[(j+i*xDim+k*yDim*xDim)+15*xDim*yDim*zNum];
float f16= f[(j+i*xDim+k*yDim*xDim)+16*xDim*yDim*zNum];
float f17= f[(j+i*xDim+k*yDim*xDim)+17*xDim*yDim*zNum];
float f18= f[(j+i*xDim+k*yDim*xDim)+18*xDim*yDim*zNum];
// float f2 = f[index+xDim*yDim*zNum*2 ];
// float f3 = f[index+xDim*yDim*zNum*3 ];
// float f4 = f[index+xDim*yDim*zNum*4 ];
// float f5 = f[index+xDim*yDim*zNum*5 ];
// float f6 = f[index+xDim*yDim*zNum*6 ];
// float f7 = f[index+xDim*yDim*zNum*7 ];
// float f8 = f[index+xDim*yDim*zNum*8 ];
// float f9 = f[index+xDim*yDim*zNum*9 ];
// float f10= f[index+xDim*yDim*zNum*10];
// float f11= f[index+xDim*yDim*zNum*11];
// float f12= f[index+xDim*yDim*zNum*12];
// float f13= f[index+xDim*yDim*zNum*13];
// float f14= f[index+xDim*yDim*zNum*14];
// float f15= f[index+xDim*yDim*zNum*15];
// float f16= f[index+xDim*yDim*zNum*16];
// float f17= f[index+xDim*yDim*zNum*17];
// float f18= f[index+xDim*yDim*zNum*18];
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<x0+scale*j<<", "<<y0+scale*i<<", "<<z0+scale*(zMin+k)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}}}
}
void WriteForces(float *FX, float *FY, float *FZ, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+ForceTime<<", "<<FX[i]/ref<<", "<<FY[i]/ref<<", "<<FZ[i]/ref<<endl;
}
}
int main(int argc, char *argv[])
{
int GPU_N;
cudaGetDeviceCount(&GPU_N);
//GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
int outputflag = 1;
if(argc>1){
if(strcmp(argv[1],"-no")==0){
outputflag = 0;
cout<<"no outputs option\n";
}
}
ofstream output;
ofstream outputForce;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
size_t pitch_elements = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<"omega : "<<omega<<endl;
cout<<"omegaLR : "<<omegaLR<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"gridLR: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
cout<<"Refinement: "<<LRLEVEL<<endl;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *FX_h[GPU_N],*FY_h[GPU_N],*FZ_h[GPU_N];
float *FX_d[GPU_N],*FY_d[GPU_N],*FZ_d[GPU_N];
float *FX_total,*FY_total,*FZ_total;
FX_total = (float *)malloc(ForceTime*sizeof(float));
FY_total = (float *)malloc(ForceTime*sizeof(float));
FZ_total = (float *)malloc(ForceTime*sizeof(float));
for(i=0;i<(ForceTime);i++){
FX_total[i] = 0;
FY_total[i] = 0;
FZ_total[i] = 0;
}
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
FX_h [n] = (float *)malloc(ForceTime*sizeof(float));
FY_h [n] = (float *)malloc(ForceTime*sizeof(float));
FZ_h [n] = (float *)malloc(ForceTime*sizeof(float));
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
cudaDeviceEnablePeerAccess(m,0);
}
cudaMalloc((void **) &f_inner_A_d[n], pitch*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &f_inner_B_d[n], pitch*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) & g_A_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_B_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_A_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_B_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_temp[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & FX_d[n], (ForceTime)*sizeof(float));
cudaMalloc((void **) & FY_d[n], (ForceTime)*sizeof(float));
cudaMalloc((void **) & FZ_d[n], (ForceTime)*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(i=0;i<(ForceTime);i++){
FX_h[n][i] = 0;
FY_h[n][i] = 0;
FZ_h[n][i] = 0;
}
cudaMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy( FX_d[n], FX_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
cudaMemcpy( FY_d[n], FY_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
cudaMemcpy( FZ_d[n], FZ_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
initialize_single<<<grid , threads>>>(f_inner_A_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
initialize_single<<<grid , threads>>>(f_inner_B_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_A_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_B_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_A_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_B_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_temp[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_temp[n],pitch_elements,YDIM,GPU_N,0);
}//end Malloc and Initialize
//data pointers for LR
float *f_inner_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_inner_LR_A_d[GPU_N], *g_LR_A_d[GPU_N], *h_LR_A_d[GPU_N];
float *f_inner_LR_B_d[GPU_N], *g_LR_B_d[GPU_N], *h_LR_B_d[GPU_N];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
size_t LRpitch = 2;
while(LRpitch<XLRDIM)
LRpitch=LRpitch*2;
LRpitch = LRpitch*sizeof(float);
size_t LRpitch_elements = LRpitch/sizeof(float);
cout<<"LR Pitch (in elements): "<<LRpitch/sizeof(float)<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LRthreads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LRgrid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
//LR setup
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_inner_LR_h[n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
cudaSetDevice(n);
cudaMalloc((void **) &f_inner_LR_A_d[n], LRpitch*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) &f_inner_LR_B_d[n], LRpitch*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) & g_LR_A_d[n], LRpitch*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & g_LR_B_d[n], LRpitch*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_A_d[n], LRpitch*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_B_d[n], LRpitch*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & g_LR_temp[n], LRpitch*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_temp[n], LRpitch*YLRDIM* 19*sizeof(float));
//initialize host f_inner
for (i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_inner_LR_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
cudaMemcpy2D(f_inner_LR_A_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(f_inner_LR_B_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_LR_A_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_LR_B_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_LR_A_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_LR_B_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
initialize_single<<<LRgrid , LRthreads>>>(f_inner_LR_A_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
initialize_single<<<LRgrid , LRthreads>>>(f_inner_LR_B_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
update_top_force <<<h_grid, threads, 0, stream_halo [n]>>>(h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
update_bottom_force<<<h_grid, threads, 0, stream_halo [n]>>>(g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_top <<<h_grid, threads, 0, stream_halo [n]>>>(h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>(g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
update_inner_force <<< grid, threads, 0, stream_inner[n]>>>(f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_B_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_B_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF){
update_top_LR_force <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
update_bottom_LR_force<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_top_LR <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
update_bottom_LR<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF){
update_inner_LR_force <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_inner_LR <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR_interp <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_B_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
update_bottom_LR_interp<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_B_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inner_LR_interp<<<LRgrid,LRthreads,0,stream_inner[n]>>>(f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_B_d[n],g_B_d[n],h_B_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
f_Extract<<<grid,threads,0,stream_inner[n]>>>(f_inner_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
g_Extract<<<grid,threads,0,stream_inner[n]>>>(g_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
h_Extract<<<grid,threads,0,stream_inner[n]>>>(h_B_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
//B->A
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
update_top_force <<<h_grid, threads, 0, stream_halo [n]>>>(h_B_d[n],h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
update_bottom_force<<<h_grid, threads, 0, stream_halo [n]>>>(g_B_d[n],g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
update_top <<<h_grid, threads, 0, stream_halo [n]>>>( h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>( g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
update_inner_force <<< grid, threads, 0, stream_inner[n]>>>(f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_A_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_A_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF){
update_top_LR_force <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
update_bottom_LR_force<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
update_top_LR <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
update_bottom_LR<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF){
update_inner_LR_force <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
update_inner_LR <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR_interp <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_A_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
update_bottom_LR_interp<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_A_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inner_LR_interp <<<LRgrid,LRthreads,0,stream_inner[n]>>>(f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_A_d[n],g_A_d[n],h_A_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
f_Extract<<<grid,threads,0,stream_inner[n]>>>(f_inner_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
g_Extract<<<grid,threads,0,stream_inner[n]>>>(g_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
h_Extract<<<grid,threads,0,stream_inner[n]>>>(h_A_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
}//end Time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(outputflag == 1){
cudaMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy( FX_h[n],FX_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
cudaMemcpy( FY_h[n],FY_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
cudaMemcpy( FZ_h[n],FZ_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
//Write results
WriteResults( g_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n ,1 ,0,0,0,1);
WriteResults(f_inner_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n+1 ,zInner,0,0,0,1);
WriteResults( h_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*(n+1)-1,1 ,0,0,0,1);
}
for(int i=0;i<ForceTime;i++){
FX_total[i] += FX_h[n][i];
FY_total[i] += FY_h[n][i];
FZ_total[i] += FZ_h[n][i];
}
cudaFree(f_inner_A_d[n]);
cudaFree(f_inner_B_d[n]);
cudaFree( g_A_d[n]);
cudaFree( g_B_d[n]);
cudaFree( h_A_d[n]);
cudaFree( h_B_d[n]);
cudaFree( g_temp[n]);
cudaFree( h_temp[n]);
}//end write results
WriteForces(FX_total,FY_total,FZ_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(outputflag == 1){
cudaMemcpy2D(f_inner_LR_h[n],XLRDIM*sizeof(float),f_inner_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( g_LR_h[n],XLRDIM*sizeof(float), g_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( h_LR_h[n],XLRDIM*sizeof(float), h_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyDeviceToHost);
//Write results
WriteResults( g_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n ,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
WriteResults(f_inner_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n+1 ,zLRInner,LRX0,LRY0,LRZ0,LRFACTOR);
WriteResults( h_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*(n+1)-1,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
}
cudaFree(f_inner_LR_A_d[n]);
cudaFree(f_inner_LR_B_d[n]);
cudaFree( g_LR_A_d[n]);
cudaFree( g_LR_B_d[n]);
cudaFree( h_LR_A_d[n]);
cudaFree( h_LR_B_d[n]);
cudaFree( g_LR_temp[n]);
cudaFree( h_LR_temp[n]);
}//end GPU loop for LR
}//end write results of LR
return(0);
}
|
95080724495a4bc8ade098ae15104f20c150f37a.hip
|
// !!! This is a file automatically generated by hipify!!!
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
int* d_workspace,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
|
95080724495a4bc8ade098ae15104f20c150f37a.cu
|
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
int* d_workspace,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
07e64344194d6fd74ebfed9af5f735e3d1d242f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void upsampleFilterForegroundMaskKernel( hipTextureObject_t subsampled_mask, unsigned upsample_rows, unsigned upsample_cols, unsigned sample_rate, const float sigma, hipSurfaceObject_t upsampled_mask, hipSurfaceObject_t filter_mask ) {
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x >= upsample_cols || y >= upsample_rows) return;
//A window search
const int halfsize = __float2uint_ru(sigma) * 2;
float total_weight = 0.0f;
float total_value = 0.0f;
for(int neighbor_y = y - halfsize; neighbor_y <= y + halfsize; neighbor_y++) {
for(int neighbor_x = x - halfsize; neighbor_x <= x + halfsize; neighbor_x++) {
//Retrieve the mask value at neigbour
const auto subsampled_neighbor_x = neighbor_x / sample_rate;
const auto subsampled_neighbor_y = neighbor_y / sample_rate;
const unsigned char neighbor_foreground = tex2D<unsigned char>(subsampled_mask, subsampled_neighbor_x, subsampled_neighbor_y);
//Compute the gaussian weight
const float diff_x_square = (neighbor_x - x) * (neighbor_x - x);
const float diff_y_square = (neighbor_y - y) * (neighbor_y - y);
const float weight = __expf(0.5f * (diff_x_square + diff_y_square) / (sigma * sigma));
//Accumlate it
if(neighbor_x >= 0 && neighbor_x < upsample_cols && neighbor_y >= 0 && neighbor_y < upsample_rows)
{
total_weight += weight;
total_value += weight * float(1 - neighbor_foreground);
}
}
}
//Compute the value locally
const auto subsampled_x = x / sample_rate;
const auto subsampled_y = y / sample_rate;
const unsigned char foreground_indicator = tex2D<unsigned char>(subsampled_mask, subsampled_x, subsampled_y);
float filter_value = 0.0;
if(foreground_indicator == 0) {
filter_value = total_value / (total_weight + 1e-3f);
}
//Write to the surface
surf2Dwrite(foreground_indicator, upsampled_mask, x * sizeof(unsigned char), y);
surf2Dwrite(filter_value, filter_mask, x * sizeof(float), y);
}
|
07e64344194d6fd74ebfed9af5f735e3d1d242f4.cu
|
#include "includes.h"
__global__ void upsampleFilterForegroundMaskKernel( cudaTextureObject_t subsampled_mask, unsigned upsample_rows, unsigned upsample_cols, unsigned sample_rate, const float sigma, cudaSurfaceObject_t upsampled_mask, cudaSurfaceObject_t filter_mask ) {
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x >= upsample_cols || y >= upsample_rows) return;
//A window search
const int halfsize = __float2uint_ru(sigma) * 2;
float total_weight = 0.0f;
float total_value = 0.0f;
for(int neighbor_y = y - halfsize; neighbor_y <= y + halfsize; neighbor_y++) {
for(int neighbor_x = x - halfsize; neighbor_x <= x + halfsize; neighbor_x++) {
//Retrieve the mask value at neigbour
const auto subsampled_neighbor_x = neighbor_x / sample_rate;
const auto subsampled_neighbor_y = neighbor_y / sample_rate;
const unsigned char neighbor_foreground = tex2D<unsigned char>(subsampled_mask, subsampled_neighbor_x, subsampled_neighbor_y);
//Compute the gaussian weight
const float diff_x_square = (neighbor_x - x) * (neighbor_x - x);
const float diff_y_square = (neighbor_y - y) * (neighbor_y - y);
const float weight = __expf(0.5f * (diff_x_square + diff_y_square) / (sigma * sigma));
//Accumlate it
if(neighbor_x >= 0 && neighbor_x < upsample_cols && neighbor_y >= 0 && neighbor_y < upsample_rows)
{
total_weight += weight;
total_value += weight * float(1 - neighbor_foreground);
}
}
}
//Compute the value locally
const auto subsampled_x = x / sample_rate;
const auto subsampled_y = y / sample_rate;
const unsigned char foreground_indicator = tex2D<unsigned char>(subsampled_mask, subsampled_x, subsampled_y);
float filter_value = 0.0;
if(foreground_indicator == 0) {
filter_value = total_value / (total_weight + 1e-3f);
}
//Write to the surface
surf2Dwrite(foreground_indicator, upsampled_mask, x * sizeof(unsigned char), y);
surf2Dwrite(filter_value, filter_mask, x * sizeof(float), y);
}
|
10bc3ade83651cc2012ecceb536512f5d4d012d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "getRank.h"
#include "/usr/include/cuda/cuda_runtime.h"
#include <rocblas.h>
#include <hipsparse.h>
void makeP(double *Avals, int *rowind, int numRow, int *colind, int nnz, double dP){
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descr=0;
hipsparseOperation_t transa = HIPSPARSE_OPERATION_NON_TRANSPOSE;
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("Cusparse Library Initialization.");
exit(2);
}
status = hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("Matrix descriptor initialization failed");
exit(2);
}
status = hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("hipsparseSetMatType failed");
exit(2);
}
status = hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("hipsparseSetMatIndexBase failed");
exit(2);
}
hipsparseIndexBase_t idxBase = HIPSPARSE_INDEX_BASE_ZERO;
double *one = (double*)malloc(sizeof(double)*(numRow));
double *d = (double*)malloc(numRow*sizeof(double));
double *dev_one, *dev_d, *dev_Avals;
int *dev_csrRowInd, *dev_colind, *dev_rowind;
int i;
dP = .95;
//Convert rowInd vector to CSR format
hipMalloc(&dev_rowind, sizeof(int)*(nnz));
hipMalloc(&dev_csrRowInd, sizeof(int)*(numRow+1));
hipMemcpy(dev_rowind, rowind, sizeof(int) * (nnz), hipMemcpyHostToDevice);
status = hipsparseXcoo2csr(handle, dev_rowind, nnz, numRow, dev_csrRowInd, idxBase);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("FAILURE to set csr row indices.");
exit(2);
}
ones(one, numRow);
ones(d, numRow);
// csr format only way suportted in CUDA
hipMalloc(&dev_one, sizeof(double)*(numRow));
hipMalloc(&dev_d, sizeof(double)*(numRow));
hipMalloc(&dev_Avals, sizeof(double)*(nnz));
hipMalloc(&dev_colind, sizeof(int)*(nnz));
hipMemcpy(dev_d, d, sizeof(double) * (numRow), hipMemcpyHostToDevice);
hipMemcpy(dev_one, one, sizeof(double) * (numRow), hipMemcpyHostToDevice);
hipMemcpy(dev_Avals, Avals, sizeof(double) * (nnz), hipMemcpyHostToDevice);
hipMemcpy(dev_colind, colind, sizeof(int) * (nnz), hipMemcpyHostToDevice);
//csr multiplication call
double alpha = 1, beta = 0;
hipsparseDcsrmv(handle, transa, numRow, numRow, nnz, &alpha, descr,
dev_Avals, dev_csrRowInd, dev_colind, dev_one, &beta, dev_d);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("FAILURE to makeP.");
exit(2);
}
hipMemcpy(Avals, dev_Avals, sizeof(int) * (nnz), hipMemcpyDeviceToHost);
hipMemcpy(one, dev_one, sizeof(double) * (numRow), hipMemcpyDeviceToHost);
hipMemcpy(d, dev_d, sizeof(double) * (numRow), hipMemcpyDeviceToHost);
for (i = 0; i< nnz; i++){
if (d[rowind[i]] && Avals[i]) {
Avals[i] = dP/d[rowind[i]];
}
}
hipFree(dev_rowind);
hipFree(dev_colind);
hipFree(dev_Avals);
hipFree(dev_one);
hipFree(dev_d);
free(d);
free(one);
}
void getRank(double *Pvals, double *x, int *rowind, int *colind, int numRows, int nnz, double tol, double dP){
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descr=0;
hipsparseOperation_t transa = HIPSPARSE_OPERATION_TRANSPOSE;
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("Failed to create handle.");
exit(2);
}
status = hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("Matrix descriptor initialization failed");
exit(2);
}
status = hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("hipsparseSetMatType failed");
exit(2);
}
status = hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("hipsparseSetMatIndexBase failed");
exit(2);
}
hipsparseIndexBase_t idxBase = HIPSPARSE_INDEX_BASE_ZERO;
double *dev_y;
double *dev_x;
double *dev_Pvals;
int *dev_rowind, *dev_csrRowInd, *dev_colind;
int i;
double *y = (double*)malloc(sizeof(double)*numRows);
double *alpha, *beta;
alpha = (double*) malloc(sizeof(double));
beta = (double*) malloc(sizeof(double));
alpha[0] = 1;
//double error = 10.0;
ones(y, numRows);
hipMalloc(&dev_y, (sizeof(double)*numRows));
hipMalloc(&dev_x, sizeof(double)*(numRows));
hipMalloc(&dev_Pvals, sizeof(double)*(nnz));
hipMalloc(&dev_rowind, sizeof(double)*(nnz));
hipMalloc(&dev_csrRowInd, sizeof(double)*(numRows+1));
hipMalloc(&dev_colind, sizeof(double)*(nnz));
hipMemcpy(dev_y, y, sizeof(double)*(numRows), hipMemcpyHostToDevice);
hipMemcpy(dev_x, x, sizeof(double)*(numRows), hipMemcpyHostToDevice);
hipMemcpy(dev_rowind, rowind, sizeof(double)*(nnz), hipMemcpyHostToDevice);
hipMemcpy(dev_colind, colind, sizeof(double)*(nnz), hipMemcpyHostToDevice);
hipMemcpy(dev_Pvals, Pvals, sizeof(double)*(nnz), hipMemcpyHostToDevice);
status = hipsparseXcoo2csr(handle, dev_rowind, nnz, numRows, dev_csrRowInd, idxBase);
if (status != HIPSPARSE_STATUS_SUCCESS) {
perror("FAILURE to set csr row indices.");
exit(2);
}
i = 0;
// while (error>tol) {
while(i++<50){
//i++;
beta[0] = (double)((1-dP)/(numRows));
hipsparseDcsrmv(handle, transa, numRows, numRows, nnz, alpha, descr, dev_Pvals,
dev_csrRowInd, dev_colind, dev_x, beta, dev_y);
// error = hipblasDnrm2(numRows, dev_y, 1);
hipMemcpy(dev_x, dev_y, numRows*sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(dev_y, y, sizeof(double) * numRows, hipMemcpyHostToDevice);
}
hipMemcpy(x, dev_x, sizeof(double)*numRows, hipMemcpyDeviceToHost);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_Pvals);
hipFree(dev_rowind);
hipFree(dev_colind);
free(y);
}
double sum(double *x, int N){
int i;
double result = 0;
//#pragma omp parallel for simd reduction(+:result)
for (i = 0; i<N; i++){
result+= x[i];
}
return result;
}
void ones(double *a, int N){
int i;
//#pragma omp parallel for simd
for (i =0; i< N; i++) {
a[i] = 1;
}
}
double getError(double *v1, double *v2, int size){
int i;
double result;
// #pragma omp parallel for simd
for (i = 0; i<size; i++) {
v1[i] = v1[i]-v2[i];
}
result = 10; // not using this function to terminate while loop currently.
double *dev_v1;
hipMalloc(&dev_v1, sizeof(double)*size);
return result;
}
|
10bc3ade83651cc2012ecceb536512f5d4d012d6.cu
|
#include <stdio.h>
#include "getRank.h"
#include "/usr/include/cuda/cuda_runtime.h"
#include <cublas.h>
#include <cusparse.h>
void makeP(double *Avals, int *rowind, int numRow, int *colind, int nnz, double dP){
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descr=0;
cusparseOperation_t transa = CUSPARSE_OPERATION_NON_TRANSPOSE;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("Cusparse Library Initialization.");
exit(2);
}
status = cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("Matrix descriptor initialization failed");
exit(2);
}
status = cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("cusparseSetMatType failed");
exit(2);
}
status = cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("cusparseSetMatIndexBase failed");
exit(2);
}
cusparseIndexBase_t idxBase = CUSPARSE_INDEX_BASE_ZERO;
double *one = (double*)malloc(sizeof(double)*(numRow));
double *d = (double*)malloc(numRow*sizeof(double));
double *dev_one, *dev_d, *dev_Avals;
int *dev_csrRowInd, *dev_colind, *dev_rowind;
int i;
dP = .95;
//Convert rowInd vector to CSR format
cudaMalloc(&dev_rowind, sizeof(int)*(nnz));
cudaMalloc(&dev_csrRowInd, sizeof(int)*(numRow+1));
cudaMemcpy(dev_rowind, rowind, sizeof(int) * (nnz), cudaMemcpyHostToDevice);
status = cusparseXcoo2csr(handle, dev_rowind, nnz, numRow, dev_csrRowInd, idxBase);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("FAILURE to set csr row indices.");
exit(2);
}
ones(one, numRow);
ones(d, numRow);
// csr format only way suportted in CUDA
cudaMalloc(&dev_one, sizeof(double)*(numRow));
cudaMalloc(&dev_d, sizeof(double)*(numRow));
cudaMalloc(&dev_Avals, sizeof(double)*(nnz));
cudaMalloc(&dev_colind, sizeof(int)*(nnz));
cudaMemcpy(dev_d, d, sizeof(double) * (numRow), cudaMemcpyHostToDevice);
cudaMemcpy(dev_one, one, sizeof(double) * (numRow), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Avals, Avals, sizeof(double) * (nnz), cudaMemcpyHostToDevice);
cudaMemcpy(dev_colind, colind, sizeof(int) * (nnz), cudaMemcpyHostToDevice);
//csr multiplication call
double alpha = 1, beta = 0;
cusparseDcsrmv(handle, transa, numRow, numRow, nnz, &alpha, descr,
dev_Avals, dev_csrRowInd, dev_colind, dev_one, &beta, dev_d);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("FAILURE to makeP.");
exit(2);
}
cudaMemcpy(Avals, dev_Avals, sizeof(int) * (nnz), cudaMemcpyDeviceToHost);
cudaMemcpy(one, dev_one, sizeof(double) * (numRow), cudaMemcpyDeviceToHost);
cudaMemcpy(d, dev_d, sizeof(double) * (numRow), cudaMemcpyDeviceToHost);
for (i = 0; i< nnz; i++){
if (d[rowind[i]] && Avals[i]) {
Avals[i] = dP/d[rowind[i]];
}
}
cudaFree(dev_rowind);
cudaFree(dev_colind);
cudaFree(dev_Avals);
cudaFree(dev_one);
cudaFree(dev_d);
free(d);
free(one);
}
void getRank(double *Pvals, double *x, int *rowind, int *colind, int numRows, int nnz, double tol, double dP){
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descr=0;
cusparseOperation_t transa = CUSPARSE_OPERATION_TRANSPOSE;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("Failed to create handle.");
exit(2);
}
status = cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("Matrix descriptor initialization failed");
exit(2);
}
status = cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("cusparseSetMatType failed");
exit(2);
}
status = cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("cusparseSetMatIndexBase failed");
exit(2);
}
cusparseIndexBase_t idxBase = CUSPARSE_INDEX_BASE_ZERO;
double *dev_y;
double *dev_x;
double *dev_Pvals;
int *dev_rowind, *dev_csrRowInd, *dev_colind;
int i;
double *y = (double*)malloc(sizeof(double)*numRows);
double *alpha, *beta;
alpha = (double*) malloc(sizeof(double));
beta = (double*) malloc(sizeof(double));
alpha[0] = 1;
//double error = 10.0;
ones(y, numRows);
cudaMalloc(&dev_y, (sizeof(double)*numRows));
cudaMalloc(&dev_x, sizeof(double)*(numRows));
cudaMalloc(&dev_Pvals, sizeof(double)*(nnz));
cudaMalloc(&dev_rowind, sizeof(double)*(nnz));
cudaMalloc(&dev_csrRowInd, sizeof(double)*(numRows+1));
cudaMalloc(&dev_colind, sizeof(double)*(nnz));
cudaMemcpy(dev_y, y, sizeof(double)*(numRows), cudaMemcpyHostToDevice);
cudaMemcpy(dev_x, x, sizeof(double)*(numRows), cudaMemcpyHostToDevice);
cudaMemcpy(dev_rowind, rowind, sizeof(double)*(nnz), cudaMemcpyHostToDevice);
cudaMemcpy(dev_colind, colind, sizeof(double)*(nnz), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Pvals, Pvals, sizeof(double)*(nnz), cudaMemcpyHostToDevice);
status = cusparseXcoo2csr(handle, dev_rowind, nnz, numRows, dev_csrRowInd, idxBase);
if (status != CUSPARSE_STATUS_SUCCESS) {
perror("FAILURE to set csr row indices.");
exit(2);
}
i = 0;
// while (error>tol) {
while(i++<50){
//i++;
beta[0] = (double)((1-dP)/(numRows));
cusparseDcsrmv(handle, transa, numRows, numRows, nnz, alpha, descr, dev_Pvals,
dev_csrRowInd, dev_colind, dev_x, beta, dev_y);
// error = cublasDnrm2(numRows, dev_y, 1);
cudaMemcpy(dev_x, dev_y, numRows*sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_y, y, sizeof(double) * numRows, cudaMemcpyHostToDevice);
}
cudaMemcpy(x, dev_x, sizeof(double)*numRows, cudaMemcpyDeviceToHost);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_Pvals);
cudaFree(dev_rowind);
cudaFree(dev_colind);
free(y);
}
double sum(double *x, int N){
int i;
double result = 0;
//#pragma omp parallel for simd reduction(+:result)
for (i = 0; i<N; i++){
result+= x[i];
}
return result;
}
void ones(double *a, int N){
int i;
//#pragma omp parallel for simd
for (i =0; i< N; i++) {
a[i] = 1;
}
}
double getError(double *v1, double *v2, int size){
int i;
double result;
// #pragma omp parallel for simd
for (i = 0; i<size; i++) {
v1[i] = v1[i]-v2[i];
}
result = 10; // not using this function to terminate while loop currently.
double *dev_v1;
cudaMalloc(&dev_v1, sizeof(double)*size);
return result;
}
|
11a269835216a4e975c0cf593f1d329bba184008.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <time.h>
#include <sys/time.h>
#include "deflate_kernel.cu"
// defined in deflate_kernel.cu
// define CHUNK_SIZE 32768
// define THREAD_NUM 1024
// Input: Filename
int main(int argc, char *argv[])
{
int i;
int f_handle;
char *f_in;
char *f_out;
struct stat finfo;
char * inputfname;
char * outputfname;
if (argc < 3)
{
printf("USAGE: %s <input filename> <output filename>\n", argv[0]);
exit(1);
}
inputfname = argv[1];
outputfname = argv[2];
f_handle = open(inputfname, O_RDONLY);
fstat(f_handle, &finfo);
f_in = (char*) malloc(finfo.st_size);
f_out = (char*) malloc(finfo.st_size);
unsigned int data_bytes = (unsigned int)finfo.st_size;
printf("This file has %d bytes data\n", data_bytes);
read (f_handle, f_in, data_bytes);
//Set the number of blocks and threads
dim3 grid(1, 1, 1);
dim3 block(THREAD_NUM, 1, 1);
char* d_in;
hipMalloc((void**) &d_in, data_bytes);
hipMemcpy(d_in, f_in, data_bytes, hipMemcpyHostToDevice);
char* d_out;
hipMalloc((void**) &d_out, data_bytes);
hipMemset(d_out, 0, data_bytes);
struct timeval start_tv, end_tv;
time_t sec;
time_t ms;
time_t diff;
gettimeofday(&start_tv, NULL);
hipLaunchKernelGGL(( deflatekernel), dim3(grid), dim3(block), 0, 0, data_bytes, d_in, d_out);
hipDeviceSynchronize();
gettimeofday(&end_tv, NULL);
sec = end_tv.tv_sec - start_tv.tv_sec;
ms = end_tv.tv_usec - start_tv.tv_usec;
diff = sec * 1000000 + ms;
printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0));
hipMemcpy(f_out, d_out, data_bytes, hipMemcpyDeviceToHost);
// Inflate data_out using zlib
// Meh
// Compare inflated data with input
// whatever
FILE *writeFile;
writeFile = fopen(outputfname,"w+");
for(i = 0; i < data_bytes; i++)
fprintf(writeFile,"%c", f_out[i]);
fclose(writeFile);
return 0;
}
|
11a269835216a4e975c0cf593f1d329bba184008.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <time.h>
#include <sys/time.h>
#include "deflate_kernel.cu"
// defined in deflate_kernel.cu
// define CHUNK_SIZE 32768
// define THREAD_NUM 1024
// Input: Filename
int main(int argc, char *argv[])
{
int i;
int f_handle;
char *f_in;
char *f_out;
struct stat finfo;
char * inputfname;
char * outputfname;
if (argc < 3)
{
printf("USAGE: %s <input filename> <output filename>\n", argv[0]);
exit(1);
}
inputfname = argv[1];
outputfname = argv[2];
f_handle = open(inputfname, O_RDONLY);
fstat(f_handle, &finfo);
f_in = (char*) malloc(finfo.st_size);
f_out = (char*) malloc(finfo.st_size);
unsigned int data_bytes = (unsigned int)finfo.st_size;
printf("This file has %d bytes data\n", data_bytes);
read (f_handle, f_in, data_bytes);
//Set the number of blocks and threads
dim3 grid(1, 1, 1);
dim3 block(THREAD_NUM, 1, 1);
char* d_in;
cudaMalloc((void**) &d_in, data_bytes);
cudaMemcpy(d_in, f_in, data_bytes, cudaMemcpyHostToDevice);
char* d_out;
cudaMalloc((void**) &d_out, data_bytes);
cudaMemset(d_out, 0, data_bytes);
struct timeval start_tv, end_tv;
time_t sec;
time_t ms;
time_t diff;
gettimeofday(&start_tv, NULL);
deflatekernel<<<grid, block>>>(data_bytes, d_in, d_out);
cudaThreadSynchronize();
gettimeofday(&end_tv, NULL);
sec = end_tv.tv_sec - start_tv.tv_sec;
ms = end_tv.tv_usec - start_tv.tv_usec;
diff = sec * 1000000 + ms;
printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0));
cudaMemcpy(f_out, d_out, data_bytes, cudaMemcpyDeviceToHost);
// Inflate data_out using zlib
// Meh
// Compare inflated data with input
// whatever
FILE *writeFile;
writeFile = fopen(outputfname,"w+");
for(i = 0; i < data_bytes; i++)
fprintf(writeFile,"%c", f_out[i]);
fclose(writeFile);
return 0;
}
|
ca15222de62f6f9e0471809f62b5f80d74ba835a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shmem_reduce_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
const float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ca15222de62f6f9e0471809f62b5f80d74ba835a.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shmem_reduce_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
const float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shmem_reduce_kernel<<<gridBlock,threadBlock>>>(d_out,d_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shmem_reduce_kernel<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shmem_reduce_kernel<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
52296f9a6f34d2e47e9b20334362c1d6f9e69ced.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgerbt_kernels.cu normal z -> d, Fri Sep 11 18:29:20 2015
@author Adrien REMY
*/
#include "common_magma.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_delementary_multiplication_devfunc(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du,
double *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2) && (idy < n/2)) {
dA += idx + idy * ldda;
double a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ double u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x]=du[0];
u2[threadIdx.x]=du[n/2];
v1[threadIdx.y]=dv[0];
v2[threadIdx.y]=dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_delementary_multiplication_kernel(
magma_int_t n,
double *dA, magma_int_t offsetA, magma_int_t ldda,
double *du, magma_int_t offsetu,
double *dv, magma_int_t offsetv)
{
magmablas_delementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_delementary_multiplication_kernel_batched(
magma_int_t n,
double **dA_array, magma_int_t offsetA, magma_int_t ldda,
double *du, magma_int_t offsetu,
double *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_delementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_dapply_vector_devfunc(
magma_int_t n,
double *du, double *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
double a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_vector_kernel(
magma_int_t n,
double *du, magma_int_t offsetu, double *db, magma_int_t offsetb )
{
magmablas_dapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_vector_kernel_batched(
magma_int_t n,
double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_dapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_dapply_transpose_vector_devfunc(
magma_int_t n,
double *du,double *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
double a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_transpose_vector_kernel(
magma_int_t n,
double *du, magma_int_t offsetu, double *db, magma_int_t offsetb )
{
magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_transpose_vector_kernel_batched(
magma_int_t n,
double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
52296f9a6f34d2e47e9b20334362c1d6f9e69ced.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgerbt_kernels.cu normal z -> d, Fri Sep 11 18:29:20 2015
@author Adrien REMY
*/
#include "common_magma.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_delementary_multiplication_devfunc(
magma_int_t n,
double *dA, magma_int_t ldda,
double *du,
double *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2) && (idy < n/2)) {
dA += idx + idy * ldda;
double a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ double u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x]=du[0];
u2[threadIdx.x]=du[n/2];
v1[threadIdx.y]=dv[0];
v2[threadIdx.y]=dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_delementary_multiplication_kernel(
magma_int_t n,
double *dA, magma_int_t offsetA, magma_int_t ldda,
double *du, magma_int_t offsetu,
double *dv, magma_int_t offsetv)
{
magmablas_delementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_delementary_multiplication_kernel_batched(
magma_int_t n,
double **dA_array, magma_int_t offsetA, magma_int_t ldda,
double *du, magma_int_t offsetu,
double *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_delementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_dapply_vector_devfunc(
magma_int_t n,
double *du, double *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
double a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_vector_kernel(
magma_int_t n,
double *du, magma_int_t offsetu, double *db, magma_int_t offsetb )
{
magmablas_dapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_vector_kernel_batched(
magma_int_t n,
double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_dapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_dapply_transpose_vector_devfunc(
magma_int_t n,
double *du,double *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2) {
du += idx;
db += idx;
double a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_transpose_vector_kernel(
magma_int_t n,
double *du, magma_int_t offsetu, double *db, magma_int_t offsetb )
{
magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_dapply_transpose_vector_kernel_batched(
magma_int_t n,
double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
106efee93485d5ddd0b11b8f9311e3c3168ffad7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
to-do : 1. change the matrix order to column major and
-> change matrix
-> change indexing
*/
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include "csr.hpp"
//__________________________________________________________________
// device function for the indexing
// rather a lambda?
__device__ unsigned int crd2idx(unsigned int batch,
unsigned int batchsize,
unsigned int v) {
return v * batchsize + batch;
}
//__________________________________________________________________
// produce a 2d grid based on the sizes
dim3 get_grid(unsigned int x, unsigned int y, dim3 block_2d) {
dim3 grid_2d((x + (block_2d.x - 1)) / block_2d.x,
(y + (block_2d.y - 1)) / block_2d.y);
return grid_2d;
}
//__________________________________________________________________
// produce a grid_block based on the sizes
dim3 get_block(unsigned int x, unsigned int y) {
return dim3(1152, 4);
}
//__________________________________________________________________
// gpu function to run the multiple source bellman ford
// 1. use array of things indices to be run
// 2. loop over indices to be run
// 3. give back array of the ones that changed
/*
__global__ void bf_iteration(int n,
unsigned int batchsize,
unsigned int *csr_index,
unsigned int *csr_cols,
float *csr_weights,
float *d,
float *d_new,
unsigned int *ind,
int *result) {
auto thisThread = blockIdx.x * blockDim.x + threadIdx.x;
auto numThreads = gridDim.x + blockDim.x;
// loop over all the batches that need to be done
for (unsigned int batch = 0; batch < batchsize; ++batch) {
bool changes = false;
auto idx = ind[batch];
for (unsigned int v = thisThread; v < n; v += numThreads) {
float dist = d[crd2idx(idx, batchsize, v)];
for(unsigned int i = csr_index[v]; i < csr_index[v + 1]; ++i) {
auto u = csr_cols[i];
auto weight = csr_weights[i];
if(dist > d[crd2idx(idx, batchsize, u)] + weight) {
dist = d[crd2idx(idx, batchsize, u)] + weight;
changes = true;
}
}
d_new[crd2idx(idx, batchsize, v)] = dist;
}
// check if a certain batch changed
if (changes) {
result[idx] = 1;
}
}
}
*/
__global__ void bf_iteration_2d(int n,
unsigned int batchsize,
unsigned int *csr_index,
unsigned int *csr_cols,
float *csr_weights,
float *d,
float *d_new,
int *result) {
auto thread_x = blockIdx.x * blockDim.x + threadIdx.x;
auto thread_y = blockIdx.y * blockDim.y + threadIdx.y;
auto n_threads_x = gridDim.x + blockDim.x;
auto n_threads_y = gridDim.y + blockDim.y;
// loop over all the batches that need to be done
bool changes = false;
for (unsigned int v = thread_x; v < n; v += n_threads_x) {
for (unsigned int batch = thread_y; batch < batchsize; batch += n_threads_y) {
float dist = d[crd2idx(batch, batchsize, v)];
for(unsigned int i = csr_index[v]; i < csr_index[v + 1]; ++i) {
auto u = csr_cols[i];
auto weight = csr_weights[i];
if(dist > d[crd2idx(batch, batchsize, u)] + weight) {
dist = d[crd2idx(batch, batchsize, u)] + weight;
changes = true;
}
}
d_new[crd2idx(batch, batchsize, v)] = dist;
}
// check if a certain batch changed
}
if (changes) {
*result = 1;
}
}
//___________________________________________________________________
// run the bf stuff
void run_bf(const csr_matrix &tr,
unsigned int batchsize,
const std::vector<unsigned int> &sources) {
// 1.0. allocate memory matrix and move to gpu
unsigned int *csr_index;
unsigned int *csr_cols;
float *csr_weights;
hipMalloc(&csr_index, (tr.n + 1) * sizeof(unsigned int));
hipMalloc(&csr_cols, tr.nnz * sizeof(unsigned int));
hipMalloc(&csr_weights, tr.nnz * sizeof(float));
hipMemcpy(csr_index, tr.ind.data(), (tr.n + 1) * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(csr_cols, tr.cols.data(), tr.nnz * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(csr_weights, tr.weights.data(), tr.nnz * sizeof(unsigned int), hipMemcpyHostToDevice);
// 1.1 allocate memory distances and move to gpu
float *d;
float *d_new;
int *result;
hipMalloc(&d, batchsize * tr.n * sizeof(float));
hipMalloc(&d_new, batchsize * tr.n * sizeof(float));
hipMalloc(&result, /*batchsize * */ sizeof(int));
std::vector <float> initial;
initial.resize(tr.n * batchsize);
std::fill(initial.begin(), initial.end(), FLT_MAX);
for (std::size_t b = 0; b < batchsize; ++b) {
initial[b*batchsize + sources[b]] = 0;
}
hipMemcpy(d, initial.data(), tr.n * batchsize * sizeof(float), hipMemcpyHostToDevice);
/*
// 1d strategy
// 2. loop over all the problems until they are all solved
// controll array c for the indices that did change
// array of indices to run over
unsigned int *c, *ind_host, *ind_dev;
c = (unsigned int*) malloc (batchsize * sizeof(unsigned int));
ind_host = (unsigned int*) malloc (batchsize * sizeof(unsigned int));
for (unsigned int i = 0; i < batchsize; ++i) {
ind_host[i] = i;
}
hipMalloc(&ind_dev, batchsize*sizeof(unsigned int));
unsigned int num_blocks = (tr.n + 255) / 256;
unsigned int to_solve = batchsize;
while(true) {
hipMemset(result, 0, batchsize*sizeof(int));
hipMemcpy(ind_dev, ind_host, batchsize*sizeof(int), hipMemcpyHostToDevice);
bf_iteration<<<num_blocks, 256>>>(tr.n, to_solve,
csr_index, csr_cols, csr_weights,
d, d_new, ind_dev, result);
// check for iteration and decide which ones should be iterated again
hipMemcpy(c, result, batchsize*sizeof(int), hipMemcpyDeviceToHost);
std::size_t cnt = 0;
for (std::size_t i = 0; i < batchsize; ++i) {
if (!c[i]) {
ind_host[cnt] = i;
++cnt;
}
}
to_solve = cnt;
if (cnt == batchsize)
break;
std::swap(d, d_new);
}
*/
// 2d strategy
dim3 block_2d = get_block(tr.n, batchsize);
dim3 grid_2d = get_grid(tr.n, batchsize, block_2d);
while(true) {
hipMemset(result, 0, sizeof(int));
hipLaunchKernelGGL(( bf_iteration_2d), dim3(grid_2d), dim3(block_2d), 0, 0, tr.n, batchsize,
csr_index, csr_cols, csr_weights,
d, d_new, result);
unsigned int c;
hipMemcpy(&c, result, sizeof(int), hipMemcpyDeviceToHost);
if (!c)
break;
std::swap(d, d_new);
}
// 4. free memory
hipFree(csr_index);
hipFree(csr_cols);
hipFree(csr_weights);
hipFree(d);
hipFree(d_new);
hipFree(result);
// hipFree(ind_dev);
// free(c);
// free(ind_host);
}
//___________________________________________________________________
// int main(int argc, char** argv)
int main(int argc, char **argv) {
if(argc != 3)
throw std::runtime_error("Expected instance and batch size as argument");
unsigned int batchsize = std::atoi(argv[2]);
std::mt19937 prng{42};
std::uniform_real_distribution<float> weight_distrib{0.0f, 1.0f};
// Load the graph.
std::cout << "algo: " << "bf_gpu" << std::endl;
std::string instance(argv[1]);
std::size_t npos = instance.find_last_of("/");
instance = instance.substr(npos+1);
std::cout << "instance: " << instance << std::endl;
std::cout << "batchsize: " << batchsize << std::endl;
std::ifstream ins(argv[1]);
std::vector<std::tuple<unsigned int, unsigned int, float>> cv;
auto io_start = std::chrono::high_resolution_clock::now();
read_graph_unweighted(ins, [&] (unsigned int u, unsigned int v) {
// Generate a random edge weight in [a, b).
cv.push_back({u, v, weight_distrib(prng)});
});
auto mat = coordinates_to_csr(std::move(cv));
auto t_io = std::chrono::high_resolution_clock::now() - io_start;
std::cout << "time_io: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t_io).count() << std::endl;
std::cout << "n_nodes: " << mat.n << std::endl;
std::cout << "n_edges: " << mat.nnz << std::endl;
auto tr = transpose(std::move(mat));
// Generate random sources.
std::uniform_int_distribution<unsigned int> s_distrib{0, mat.n - 1};
std::vector<unsigned int> sources;
for(unsigned int i = 0; i < batchsize; ++i)
sources.push_back(s_distrib(prng));
// Run the algorithm.
auto algo_start = std::chrono::high_resolution_clock::now();
run_bf(tr, batchsize, sources);
auto t_algo = std::chrono::high_resolution_clock::now() - algo_start;
std::cout << "time_mssp: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t_algo).count() << std::endl;
}
|
106efee93485d5ddd0b11b8f9311e3c3168ffad7.cu
|
/*
to-do : 1. change the matrix order to column major and
-> change matrix
-> change indexing
*/
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include "csr.hpp"
//__________________________________________________________________
// device function for the indexing
// rather a lambda?
__device__ unsigned int crd2idx(unsigned int batch,
unsigned int batchsize,
unsigned int v) {
return v * batchsize + batch;
}
//__________________________________________________________________
// produce a 2d grid based on the sizes
dim3 get_grid(unsigned int x, unsigned int y, dim3 block_2d) {
dim3 grid_2d((x + (block_2d.x - 1)) / block_2d.x,
(y + (block_2d.y - 1)) / block_2d.y);
return grid_2d;
}
//__________________________________________________________________
// produce a grid_block based on the sizes
dim3 get_block(unsigned int x, unsigned int y) {
return dim3(1152, 4);
}
//__________________________________________________________________
// gpu function to run the multiple source bellman ford
// 1. use array of things indices to be run
// 2. loop over indices to be run
// 3. give back array of the ones that changed
/*
__global__ void bf_iteration(int n,
unsigned int batchsize,
unsigned int *csr_index,
unsigned int *csr_cols,
float *csr_weights,
float *d,
float *d_new,
unsigned int *ind,
int *result) {
auto thisThread = blockIdx.x * blockDim.x + threadIdx.x;
auto numThreads = gridDim.x + blockDim.x;
// loop over all the batches that need to be done
for (unsigned int batch = 0; batch < batchsize; ++batch) {
bool changes = false;
auto idx = ind[batch];
for (unsigned int v = thisThread; v < n; v += numThreads) {
float dist = d[crd2idx(idx, batchsize, v)];
for(unsigned int i = csr_index[v]; i < csr_index[v + 1]; ++i) {
auto u = csr_cols[i];
auto weight = csr_weights[i];
if(dist > d[crd2idx(idx, batchsize, u)] + weight) {
dist = d[crd2idx(idx, batchsize, u)] + weight;
changes = true;
}
}
d_new[crd2idx(idx, batchsize, v)] = dist;
}
// check if a certain batch changed
if (changes) {
result[idx] = 1;
}
}
}
*/
__global__ void bf_iteration_2d(int n,
unsigned int batchsize,
unsigned int *csr_index,
unsigned int *csr_cols,
float *csr_weights,
float *d,
float *d_new,
int *result) {
auto thread_x = blockIdx.x * blockDim.x + threadIdx.x;
auto thread_y = blockIdx.y * blockDim.y + threadIdx.y;
auto n_threads_x = gridDim.x + blockDim.x;
auto n_threads_y = gridDim.y + blockDim.y;
// loop over all the batches that need to be done
bool changes = false;
for (unsigned int v = thread_x; v < n; v += n_threads_x) {
for (unsigned int batch = thread_y; batch < batchsize; batch += n_threads_y) {
float dist = d[crd2idx(batch, batchsize, v)];
for(unsigned int i = csr_index[v]; i < csr_index[v + 1]; ++i) {
auto u = csr_cols[i];
auto weight = csr_weights[i];
if(dist > d[crd2idx(batch, batchsize, u)] + weight) {
dist = d[crd2idx(batch, batchsize, u)] + weight;
changes = true;
}
}
d_new[crd2idx(batch, batchsize, v)] = dist;
}
// check if a certain batch changed
}
if (changes) {
*result = 1;
}
}
//___________________________________________________________________
// run the bf stuff
void run_bf(const csr_matrix &tr,
unsigned int batchsize,
const std::vector<unsigned int> &sources) {
// 1.0. allocate memory matrix and move to gpu
unsigned int *csr_index;
unsigned int *csr_cols;
float *csr_weights;
cudaMalloc(&csr_index, (tr.n + 1) * sizeof(unsigned int));
cudaMalloc(&csr_cols, tr.nnz * sizeof(unsigned int));
cudaMalloc(&csr_weights, tr.nnz * sizeof(float));
cudaMemcpy(csr_index, tr.ind.data(), (tr.n + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(csr_cols, tr.cols.data(), tr.nnz * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(csr_weights, tr.weights.data(), tr.nnz * sizeof(unsigned int), cudaMemcpyHostToDevice);
// 1.1 allocate memory distances and move to gpu
float *d;
float *d_new;
int *result;
cudaMalloc(&d, batchsize * tr.n * sizeof(float));
cudaMalloc(&d_new, batchsize * tr.n * sizeof(float));
cudaMalloc(&result, /*batchsize * */ sizeof(int));
std::vector <float> initial;
initial.resize(tr.n * batchsize);
std::fill(initial.begin(), initial.end(), FLT_MAX);
for (std::size_t b = 0; b < batchsize; ++b) {
initial[b*batchsize + sources[b]] = 0;
}
cudaMemcpy(d, initial.data(), tr.n * batchsize * sizeof(float), cudaMemcpyHostToDevice);
/*
// 1d strategy
// 2. loop over all the problems until they are all solved
// controll array c for the indices that did change
// array of indices to run over
unsigned int *c, *ind_host, *ind_dev;
c = (unsigned int*) malloc (batchsize * sizeof(unsigned int));
ind_host = (unsigned int*) malloc (batchsize * sizeof(unsigned int));
for (unsigned int i = 0; i < batchsize; ++i) {
ind_host[i] = i;
}
cudaMalloc(&ind_dev, batchsize*sizeof(unsigned int));
unsigned int num_blocks = (tr.n + 255) / 256;
unsigned int to_solve = batchsize;
while(true) {
cudaMemset(result, 0, batchsize*sizeof(int));
cudaMemcpy(ind_dev, ind_host, batchsize*sizeof(int), cudaMemcpyHostToDevice);
bf_iteration<<<num_blocks, 256>>>(tr.n, to_solve,
csr_index, csr_cols, csr_weights,
d, d_new, ind_dev, result);
// check for iteration and decide which ones should be iterated again
cudaMemcpy(c, result, batchsize*sizeof(int), cudaMemcpyDeviceToHost);
std::size_t cnt = 0;
for (std::size_t i = 0; i < batchsize; ++i) {
if (!c[i]) {
ind_host[cnt] = i;
++cnt;
}
}
to_solve = cnt;
if (cnt == batchsize)
break;
std::swap(d, d_new);
}
*/
// 2d strategy
dim3 block_2d = get_block(tr.n, batchsize);
dim3 grid_2d = get_grid(tr.n, batchsize, block_2d);
while(true) {
cudaMemset(result, 0, sizeof(int));
bf_iteration_2d<<<grid_2d, block_2d>>>(tr.n, batchsize,
csr_index, csr_cols, csr_weights,
d, d_new, result);
unsigned int c;
cudaMemcpy(&c, result, sizeof(int), cudaMemcpyDeviceToHost);
if (!c)
break;
std::swap(d, d_new);
}
// 4. free memory
cudaFree(csr_index);
cudaFree(csr_cols);
cudaFree(csr_weights);
cudaFree(d);
cudaFree(d_new);
cudaFree(result);
// cudaFree(ind_dev);
// free(c);
// free(ind_host);
}
//___________________________________________________________________
// int main(int argc, char** argv)
int main(int argc, char **argv) {
if(argc != 3)
throw std::runtime_error("Expected instance and batch size as argument");
unsigned int batchsize = std::atoi(argv[2]);
std::mt19937 prng{42};
std::uniform_real_distribution<float> weight_distrib{0.0f, 1.0f};
// Load the graph.
std::cout << "algo: " << "bf_gpu" << std::endl;
std::string instance(argv[1]);
std::size_t npos = instance.find_last_of("/");
instance = instance.substr(npos+1);
std::cout << "instance: " << instance << std::endl;
std::cout << "batchsize: " << batchsize << std::endl;
std::ifstream ins(argv[1]);
std::vector<std::tuple<unsigned int, unsigned int, float>> cv;
auto io_start = std::chrono::high_resolution_clock::now();
read_graph_unweighted(ins, [&] (unsigned int u, unsigned int v) {
// Generate a random edge weight in [a, b).
cv.push_back({u, v, weight_distrib(prng)});
});
auto mat = coordinates_to_csr(std::move(cv));
auto t_io = std::chrono::high_resolution_clock::now() - io_start;
std::cout << "time_io: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t_io).count() << std::endl;
std::cout << "n_nodes: " << mat.n << std::endl;
std::cout << "n_edges: " << mat.nnz << std::endl;
auto tr = transpose(std::move(mat));
// Generate random sources.
std::uniform_int_distribution<unsigned int> s_distrib{0, mat.n - 1};
std::vector<unsigned int> sources;
for(unsigned int i = 0; i < batchsize; ++i)
sources.push_back(s_distrib(prng));
// Run the algorithm.
auto algo_start = std::chrono::high_resolution_clock::now();
run_bf(tr, batchsize, sources);
auto t_algo = std::chrono::high_resolution_clock::now() - algo_start;
std::cout << "time_mssp: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t_algo).count() << std::endl;
}
|
230cb8430a06e0f3f8f937016d648f07bddf20d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_mix20_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *ip = NULL;
hipMalloc(&ip, XSIZE*YSIZE);
uint32_t stride = 2;
int32_t *u = NULL;
hipMalloc(&u, XSIZE*YSIZE);
int32_t *v = NULL;
hipMalloc(&v, XSIZE*YSIZE);
int32_t numSamples = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_mix20_2), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_mix20_2), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_mix20_2), dim3(gridBlock),dim3(threadBlock), 0, 0, ip,stride,u,v,numSamples);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
230cb8430a06e0f3f8f937016d648f07bddf20d8.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_mix20_2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint8_t *ip = NULL;
cudaMalloc(&ip, XSIZE*YSIZE);
uint32_t stride = 2;
int32_t *u = NULL;
cudaMalloc(&u, XSIZE*YSIZE);
int32_t *v = NULL;
cudaMalloc(&v, XSIZE*YSIZE);
int32_t numSamples = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_mix20_2<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_mix20_2<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_mix20_2<<<gridBlock,threadBlock>>>(ip,stride,u,v,numSamples);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3d9d3eda92392c3b23da20817c571d1be9098373.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp >= -1.3530E36f + cosf(-1.9632E-44f)) {
if (comp <= var_1 - var_2) {
if (comp > asinf((-0.0f - -0.0f / (var_3 - +1.6077E-36f)))) {
comp = (+0.0f * (var_4 - (var_5 + var_6 - (-1.9305E-35f * +1.0874E-43f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
hipDeviceSynchronize();
return 0;
}
|
3d9d3eda92392c3b23da20817c571d1be9098373.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) {
if (comp >= -1.3530E36f + cosf(-1.9632E-44f)) {
if (comp <= var_1 - var_2) {
if (comp > asinf((-0.0f - -0.0f / (var_3 - +1.6077E-36f)))) {
comp = (+0.0f * (var_4 - (var_5 + var_6 - (-1.9305E-35f * +1.0874E-43f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7);
cudaDeviceSynchronize();
return 0;
}
|
7c81abdee5a1ddae56b64cd7178233a6c12ea598.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bestFilter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const bool *iMatch = NULL;
hipMalloc(&iMatch, XSIZE*YSIZE);
const int *Wh = NULL;
hipMalloc(&Wh, XSIZE*YSIZE);
const float *cmax = NULL;
hipMalloc(&cmax, XSIZE*YSIZE);
const float *mus = NULL;
hipMalloc(&mus, XSIZE*YSIZE);
int *id = NULL;
hipMalloc(&id, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,iMatch,Wh,cmax,mus,id,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,iMatch,Wh,cmax,mus,id,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,iMatch,Wh,cmax,mus,id,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7c81abdee5a1ddae56b64cd7178233a6c12ea598.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bestFilter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const bool *iMatch = NULL;
cudaMalloc(&iMatch, XSIZE*YSIZE);
const int *Wh = NULL;
cudaMalloc(&Wh, XSIZE*YSIZE);
const float *cmax = NULL;
cudaMalloc(&cmax, XSIZE*YSIZE);
const float *mus = NULL;
cudaMalloc(&mus, XSIZE*YSIZE);
int *id = NULL;
cudaMalloc(&id, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bestFilter<<<gridBlock,threadBlock>>>(Params,iMatch,Wh,cmax,mus,id,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bestFilter<<<gridBlock,threadBlock>>>(Params,iMatch,Wh,cmax,mus,id,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bestFilter<<<gridBlock,threadBlock>>>(Params,iMatch,Wh,cmax,mus,id,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
99969b50815bff1b99e794632bf19180d1e66aa9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
using namespace std;
#define BUF_SIZE (1 << 10)
#define BLOCKDIM 256
__global__ void child_kernel(int *data, int seed)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&data[idx], seed);
}
__global__ void parent_kernel(int *data)
{
if (threadIdx.x == 0)
{
int child_size = BUF_SIZE/gridDim.x;
hipLaunchKernelGGL(( child_kernel), dim3(child_size/BLOCKDIM), dim3(BLOCKDIM) , 0, 0, &data[child_size*blockIdx.x], blockIdx.x+1);
}
// synchronization for other parent's kernel output
hipDeviceSynchronize();
}
int main()
{
int *data;
int num_child = 2;
hipMallocManaged((void**)&data, BUF_SIZE * sizeof(int));
hipMemset(data, 0, BUF_SIZE * sizeof(int));
hipLaunchKernelGGL(( parent_kernel), dim3(num_child), dim3(1), 0, 0, data);
hipDeviceSynchronize();
// Count elements value
int counter = 0;
for (int i = 0; i < BUF_SIZE; i++) {
counter += data[i];
}
// getting answer
int counter_h = 0;
for (int i = 0; i < num_child; i++) {
counter_h += (i+1);
}
counter_h *= BUF_SIZE / num_child;
if (counter_h == counter)
printf("Correct!!\n");
else
printf("Error!! Obtained %d. It should be %d\n", counter, counter_h);
hipFree(data);
return 0;
}
|
99969b50815bff1b99e794632bf19180d1e66aa9.cu
|
#include <cstdio>
#include <cstdlib>
#include <iostream>
using namespace std;
#define BUF_SIZE (1 << 10)
#define BLOCKDIM 256
__global__ void child_kernel(int *data, int seed)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&data[idx], seed);
}
__global__ void parent_kernel(int *data)
{
if (threadIdx.x == 0)
{
int child_size = BUF_SIZE/gridDim.x;
child_kernel<<< child_size/BLOCKDIM, BLOCKDIM >>>(&data[child_size*blockIdx.x], blockIdx.x+1);
}
// synchronization for other parent's kernel output
cudaDeviceSynchronize();
}
int main()
{
int *data;
int num_child = 2;
cudaMallocManaged((void**)&data, BUF_SIZE * sizeof(int));
cudaMemset(data, 0, BUF_SIZE * sizeof(int));
parent_kernel<<<num_child, 1>>>(data);
cudaDeviceSynchronize();
// Count elements value
int counter = 0;
for (int i = 0; i < BUF_SIZE; i++) {
counter += data[i];
}
// getting answer
int counter_h = 0;
for (int i = 0; i < num_child; i++) {
counter_h += (i+1);
}
counter_h *= BUF_SIZE / num_child;
if (counter_h == counter)
printf("Correct!!\n");
else
printf("Error!! Obtained %d. It should be %d\n", counter, counter_h);
cudaFree(data);
return 0;
}
|
e81bec362cd87bca3363e9177bf23ff18d1f4e47.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zjacobisetup.cu, normal z -> s, Mon Jun 25 18:24:24 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
__global__ void
svjacobisetup_gpu( int num_rows,
int num_vecs,
float *b,
float *d,
float *c,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix c,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( svjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobidiagscal_kernel( int num_rows,
int num_vecs,
float *b,
float *d,
float *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobi_diagscal(
magma_int_t num_rows,
magma_s_matrix d,
magma_s_matrix b,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( sjacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobiupdate_kernel( int num_rows,
int num_cols,
float *t,
float *b,
float *d,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_s_matrix
t = A*x
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobiupdate(
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( sjacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate_bw(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( sjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( sjacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x,
float *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//float add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[in]
tmp magma_s_matrix
workspace
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix tmp,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO
//float c_one = MAGMA_S_ONE;
//magma_s_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( sjacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
sftjacobicontractions_kernel(
int num_rows,
float * xkm2val,
float * xkm1val,
float * xkval,
float * zval,
float * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_S_MAKE( MAGMA_S_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_S_MAKE(
MAGMA_S_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_S_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_s_matrix
vector x^{k-2}
@param[in]
xkm1 magma_s_matrix
vector x^{k-2}
@param[in]
xk magma_s_matrix
vector x^{k-2}
@param[out]
z magma_s_matrix*
ratio
@param[out]
c magma_s_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobicontractions(
magma_s_matrix xkm2,
magma_s_matrix xkm1,
magma_s_matrix xk,
magma_s_matrix *z,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
sftjacobiupdatecheck_kernel(
int num_rows,
float delta,
float * xold,
float * xnew,
float * zprev,
float * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_S_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
float xold_l = xold[idx];
float xnew_l = xnew[idx];
float znew = MAGMA_S_MAKE(
max( MAGMA_S_ABS( xold_l - xnew_l), 1e-15), 0.0 );
float znr = zprev[idx] / znew;
float t2 = MAGMA_S_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_S_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_s_matrix*
vector xold
@param[in,out]
xnew magma_s_matrix*
vector xnew
@param[in,out]
zprev magma_s_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_s_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobiupdatecheck(
float delta,
magma_s_matrix *xold,
magma_s_matrix *xnew,
magma_s_matrix *zprev,
magma_s_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
e81bec362cd87bca3363e9177bf23ff18d1f4e47.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zjacobisetup.cu, normal z -> s, Mon Jun 25 18:24:24 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
__global__ void
svjacobisetup_gpu( int num_rows,
int num_vecs,
float *b,
float *d,
float *c,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix c,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
svjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobidiagscal_kernel( int num_rows,
int num_vecs,
float *b,
float *d,
float *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
c magma_s_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobi_diagscal(
magma_int_t num_rows,
magma_s_matrix d,
magma_s_matrix b,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
sjacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
sjacobiupdate_kernel( int num_rows,
int num_cols,
float *t,
float *b,
float *d,
float *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_s_matrix
t = A*x
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobiupdate(
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
sjacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdate_bw(
magma_int_t maxiter,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO;
//float c_one = MAGMA_S_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_s_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// sjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
sjacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
sjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
float * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
float *t,
float *b,
float *d,
float *x,
float *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
float dot = MAGMA_S_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//float add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_s_matrix
system matrix
@param[in]
t magma_s_matrix
workspace
@param[in]
b magma_s_matrix
RHS b
@param[in]
d magma_s_matrix
vector with diagonal entries
@param[in]
tmp magma_s_matrix
workspace
@param[out]
x magma_s_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_s_matrix A,
magma_s_matrix t,
magma_s_matrix b,
magma_s_matrix d,
magma_s_matrix tmp,
magma_s_matrix *x,
magma_queue_t queue )
{
// local variables
//float c_zero = MAGMA_S_ZERO
//float c_one = MAGMA_S_ONE;
//magma_s_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
sjacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
sftjacobicontractions_kernel(
int num_rows,
float * xkm2val,
float * xkm1val,
float * xkval,
float * zval,
float * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_S_MAKE( MAGMA_S_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_S_MAKE(
MAGMA_S_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_S_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_s_matrix
vector x^{k-2}
@param[in]
xkm1 magma_s_matrix
vector x^{k-2}
@param[in]
xk magma_s_matrix
vector x^{k-2}
@param[out]
z magma_s_matrix*
ratio
@param[out]
c magma_s_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobicontractions(
magma_s_matrix xkm2,
magma_s_matrix xkm1,
magma_s_matrix xk,
magma_s_matrix *z,
magma_s_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
sftjacobiupdatecheck_kernel(
int num_rows,
float delta,
float * xold,
float * xnew,
float * zprev,
float * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
float t1 = delta * MAGMA_S_ABS(cval[idx]);
float vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
float xold_l = xold[idx];
float xnew_l = xnew[idx];
float znew = MAGMA_S_MAKE(
max( MAGMA_S_ABS( xold_l - xnew_l), 1e-15), 0.0 );
float znr = zprev[idx] / znew;
float t2 = MAGMA_S_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_S_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta float
threshold
@param[in,out]
xold magma_s_matrix*
vector xold
@param[in,out]
xnew magma_s_matrix*
vector xnew
@param[in,out]
zprev magma_s_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_s_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sftjacobiupdatecheck(
float delta,
magma_s_matrix *xold,
magma_s_matrix *xnew,
magma_s_matrix *zprev,
magma_s_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
sftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
9178d16b36a085272634dc90707875f7a70b1da6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2009-2012 EM Photonics, Inc. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to EM Photonics ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code may
* not redistribute this code without the express written consent of EM
* Photonics, Inc.
*
* EM PHOTONICS MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED
* WARRANTY OF ANY KIND. EM PHOTONICS DISCLAIMS ALL WARRANTIES WITH REGARD TO
* THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL EM
* PHOTONICS BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
* DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as that
* term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of "commercial
* computer software" and "commercial computer software documentation" as
* such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the
* U.S. Government only as a commercial end item. Consistent with 48
* C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the source code with only those rights set
* forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code, the
* above Disclaimer and U.S. Government End Users Notice.
*
*/
/*
* CULA Example: systemSolve
*
* This example shows how to use a system solve for multiple data types. Each
* data type has its own example case for clarity. For each data type, the
* following steps are done:
*
* 1. Allocate a matrix on the host
* 2. Initialize CULA
* 3. Initialize the A matrix to the Identity
* 4. Call gesv on the matrix
* 5. Verify the results
* 6. Call culaShutdown
*
* After each CULA operation, the status of CULA is checked. On failure, an
* error message is printed and the program exits.
*
* Note: CULA Premium and double-precision GPU hardware are required to run the
* double-precision examples
*
* Note: this example performs a system solve on an identity matrix against a
* random vector, the result of which is that same random vector. This is not
* true in the general case and is only appropriate for this example. For a
* general case check, the product A*X should be checked against B. Note that
* because A is modifed by GESV, a copy of A would be needed with which to do
* the verification.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<cuda.h>
#include <cula_lapack.h>
#include<cula_lapack_device.h>
void checkStatus(culaStatus status)
{
char buf[256];
if(!status)
return;
culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf));
printf("%s\n", buf);
culaShutdown();
exit(EXIT_FAILURE);
}
void culaDeviceFloatExample()
{
#ifdef NDEBUG
int N = 2000;
#else
int N = 1024;
#endif
int NRHS = 1;
int i, j;
hipEvent_t start, stop;
float t_sgetrf;
hipEventCreate(&start);
hipEventCreate(&stop);
culaStatus status;
culaFloat* A = NULL;
culaFloat* A_bak = NULL;
culaFloat* B = NULL;
culaFloat* X = NULL;
culaInt* IPIV = NULL;
culaDeviceFloat* Ad = NULL;
culaDeviceFloat* Ad_bak = NULL;
culaDeviceFloat* Bd = NULL;
culaDeviceFloat* Xd = NULL;
culaDeviceInt* IPIVd = NULL;
// culaFloat one = 2.0f;
culaFloat thresh = 1e-6f;
culaFloat diff;
printf("-------------------\n");
printf(" SGETRF\n");
printf("-------------------\n");
printf("Allocating Matrices on host\n");
A = (culaFloat*)malloc(N*N*sizeof(culaFloat));
A_bak = (culaFloat*)malloc(N*N*sizeof(culaFloat));
B = (culaFloat*)malloc(N*sizeof(culaFloat));
X = (culaFloat*)malloc(N*sizeof(culaFloat));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV || !A_bak)
exit(EXIT_FAILURE);
printf("Allocating Matrices on device\n");
hipMalloc((void**)&Ad,N*N*sizeof(culaFloat));
// hipMalloc((void**)&Ad_bak,N*N*sizeof(culaFloat));
hipMalloc((void**)&Bd,N*sizeof(culaFloat));
hipMalloc((void**)&Xd,N*sizeof(culaFloat));
hipMalloc((void**)&IPIVd,N*sizeof(culaInt));
hipEventRecord(start, 0);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaFloat));
memset(A_bak, 0, N*N*sizeof(culaFloat));
for(i = 0; i < N; ++i) {
A_bak[i*N+i] = A[i * N + i] = 2.f;
// printf("%g, %g\n", one, A[i * N + i]);
}
/* A[0]=3;
A[1]=3;
A[2]=0;
A[3]=0;
A[4]=2;
A[5]=2;
A[6]=1;
A[7]=0;
A[8]=1;*/
//Printing the matix
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
B[i] = (culaFloat)(rand() % 10);
memcpy(X, B, N*sizeof(culaFloat));
memset(IPIV, 0, N*sizeof(culaInt));
//Copy from Host to Device
hipMemcpy(Ad,A, N*N*sizeof(culaFloat),hipMemcpyHostToDevice);
// hipMemcpy(Ad_bak,A_bak, N*N*sizeof(culaFloat),hipMemcpyHostToDevice);
hipMemcpy(Bd,B, N*sizeof(culaFloat),hipMemcpyHostToDevice);
hipMemcpy(Xd,X, N*sizeof(culaFloat),hipMemcpyHostToDevice);
hipMemcpy(IPIVd,IPIV, N*sizeof(culaInt),hipMemcpyHostToDevice);
/* //Printing the matix
printf("\n\n");
for (i=0; i< N*N; ++i)
{
printf("%f,",A[i]);
if((i+1)%N==0)
printf("\n");
}*/
printf("Calling culaSgetrf\n");
status = culaDeviceSgetrf(N, N, Ad, N, IPIVd);
checkStatus(status);
//Copy result from Device to Host
hipMemcpy(A,Ad, N*N*sizeof(culaFloat),hipMemcpyDeviceToHost);
// hipMemcpy(A_bak,Ad_bak, N*N*sizeof(culaFloat),hipMemcpyDeviceToHost);
hipMemcpy(B,Bd, N*sizeof(culaFloat),hipMemcpyDeviceToHost);
hipMemcpy(X,Xd, N*sizeof(culaFloat),hipMemcpyDeviceToHost);
hipMemcpy(IPIV,IPIVd, N*sizeof(culaInt),hipMemcpyDeviceToHost);
printf("Verifying Result\n");
int success = 1;
float max_b = 0.0;
for(i =0; i< N; i++)
if(max_b < fabs(B[i]))
max_b = fabs(B[i]);
/* for(i = 0; i < N; ++i)
{
fprintf(stderr, "X[%d] = %g, B[%d] = %g\n", i, X[i], i, B[i]);
}*/
if (success)
printf("Success\n");
else
printf("Failed\n");
printf("Shutting down CULA\n\n");
culaShutdown();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//printf("\n Time taken for CULA Sgesv is %f", t_sgesv);
free(A);
free(A_bak);
free(X);
free(B);
free(IPIV);
hipFree(Ad);
hipFree(Ad_bak);
hipFree(Bd);
hipFree(Xd);
hipFree(IPIVd);
hipEventElapsedTime(&t_sgetrf, start, stop);
printf("\n Time taken for CULA Sgetrf is %f ms\n", t_sgetrf);
}
void culaDeviceFloatComplexExample()
{
#ifdef NDEBUG
int N = 4096;
#else
int N = 512;
#endif
int NRHS = 1;
int i;
culaStatus status;
culaFloatComplex* A = NULL;
culaFloatComplex* B = NULL;
culaFloatComplex* X = NULL;
culaInt* IPIV = NULL;
culaFloatComplex one = { 1.0f, 0.0f };
culaFloat thresh = 1e-6f;
culaFloat diffr;
culaFloat diffc;
culaFloat diffabs;
printf("-------------------\n");
printf(" CGESV\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaFloatComplex*)malloc(N*N*sizeof(culaFloatComplex));
B = (culaFloatComplex*)malloc(N*sizeof(culaFloatComplex));
X = (culaFloatComplex*)malloc(N*sizeof(culaFloatComplex));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV)
exit(EXIT_FAILURE);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaFloatComplex));
for(i = 0; i < N; ++i)
A[i*N+i] = one;
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
{
B[i].x = (culaFloat)rand();
B[i].y = (culaFloat)rand();
}
memcpy(X, B, N*sizeof(culaFloatComplex));
memset(IPIV, 0, N*sizeof(culaInt));
printf("Calling culaCgesv\n");
status = culaCgesv(N, NRHS, A, N, IPIV, X, N);
checkStatus(status);
printf("Verifying Result\n");
for(i = 0; i < N; ++i)
{
diffr = X[i].x - B[i].x;
diffc = X[i].y - B[i].y;
diffabs = (culaFloat)sqrt(X[i].x*X[i].x+X[i].y*X[i].y)
- (culaFloat)sqrt(B[i].x*B[i].x+B[i].y*B[i].y);
if(diffr < 0.0f)
diffr = -diffr;
if(diffc < 0.0f)
diffc = -diffc;
if(diffabs < 0.0f)
diffabs = -diffabs;
if(diffr > thresh || diffc > thresh || diffabs > thresh)
printf("Result check failed: i=%d X[i]=(%f,%f) B[i]=(%f,%f)", i, X[i].x, X[i].y, B[i].x, B[i].y);
}
printf("Shutting down CULA\n\n");
culaShutdown();
free(A);
free(B);
free(IPIV);
}
// Note: CULA Premium is required for double-precision
#ifdef CULA_PREMIUM
void culaDeviceDoubleExample()
{
#ifdef NDEBUG
int N = 2000;
#else
int N = 512;
#endif
int NRHS = 1;
int i,j;
hipEvent_t start, stop;
float t_dgetrf;
hipEventCreate(&start);
hipEventCreate(&stop);
culaStatus status;
culaDouble* A = NULL;
culaDouble* A_bak = NULL;
culaDouble* B = NULL;
culaDouble* X = NULL;
culaInt* IPIV = NULL;
culaDeviceDouble* Ad = NULL;
culaDeviceDouble* Ad_bak = NULL;
culaDeviceDouble* Bd = NULL;
culaDeviceDouble* Xd = NULL;
culaDeviceInt* IPIVd = NULL;
// culaDouble *work = NULL;
// culaDouble *swork = NULL;
// int *info;
// culaDouble one = 1.0;
culaDouble thresh = 1e-6;
culaDouble diff;
printf("\t-------------------\n");
printf(" DGETRF\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaDouble*)malloc(N*N*sizeof(culaDouble));
A_bak = (culaDouble*)malloc(N*N*sizeof(culaDouble));
B = (culaDouble*)malloc(N*sizeof(culaDouble));
X = (culaDouble*)malloc(N*sizeof(culaDouble));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
// work = (culaDouble*)malloc(N * NRHS * sizeof(culaDouble));
//swork = (culaDouble*)malloc(N * (N+NRHS) * sizeof(culaDouble));
// info = (int *)malloc(N * sizeof(int));
if(!A || !B || !IPIV || !A_bak)
exit(EXIT_FAILURE);
printf("Allocating Matrices on device\n");
hipMalloc((void**)&Ad,N*N*sizeof(culaDouble));
// hipMalloc((void**)&Ad_bak,N*N*sizeof(culaFloat));
hipMalloc((void**)&Bd,N*sizeof(culaDouble));
hipMalloc((void**)&Xd,N*sizeof(culaDouble));
hipMalloc((void**)&IPIVd,N*sizeof(culaInt));
hipEventRecord(start, 0);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaDouble));
memset(A_bak, 0, N*N*sizeof(culaDouble));
for(i = 0; i < N; ++i){
A_bak[i * N + i] = A[i*N + i] = 2.f;
if (i > 0)
A_bak[i * N + i-1] = A[i*N + i-1] = 0.5f;
if (i < N - 1)
A_bak[i * N + i+1] = A[i*N + i+1] = 0.5f;
}
// Set B to a random matrix (see note at top
for(i = 0; i < N; ++i)
B[i] = (culaDouble)(rand() % 10);
memcpy(X, B, N*sizeof(culaDouble));
memset(IPIV, 0, N*sizeof(culaInt));
//Copy from Host to Device
hipMemcpy(Ad,A, N*N*sizeof(culaDouble),hipMemcpyHostToDevice);
// hipMemcpy(Ad_bak,A_bak, N*N*sizeof(culaFloat),hipMemcpyHostToDevice);
hipMemcpy(Bd,B, N*sizeof(culaDouble),hipMemcpyHostToDevice);
hipMemcpy(Xd,X, N*sizeof(culaDouble),hipMemcpyHostToDevice);
hipMemcpy(IPIVd,IPIV, N*sizeof(culaInt),hipMemcpyHostToDevice);
printf("Calling culaDgetrf\n");
int iter = 0;
status = culaDeviceDgetrf(N, N, Ad, N, IPIVd);
// printf("iter = %d\n", iter);
if(status == culaInsufficientComputeCapability)
{
printf("No Double precision support available, skipping example\n");
free(A);
free(B);
free(IPIV);
culaShutdown();
return;
}
checkStatus(status);
//Copy result from Device to Host
hipMemcpy(A,Ad, N*N*sizeof(culaDouble),hipMemcpyDeviceToHost);
// hipMemcpy(A_bak,Ad_bak, N*N*sizeof(culaFloat),hipMemcpyDeviceToHost);
hipMemcpy(B,Bd, N*sizeof(culaDouble),hipMemcpyDeviceToHost);
hipMemcpy(Xd,X, N*sizeof(culaDouble),hipMemcpyDeviceToHost);
hipMemcpy(IPIVd,IPIV, N*sizeof(culaInt),hipMemcpyDeviceToHost);
printf("Verifying Result\n");
int success = 1;
double max_b = 0.0;
for (i = 0; i < N; i++)
if (max_b < fabs(B[i]))
max_b = fabs(B[i]);
/* for(i = 0; i < N; ++i)
{
fprintf(stderr, "X[%d] = %g,B[%d] = %g\n", i, X[i], i, B[i]);
}
for (i = 0; i < N; i++) {
int j;
for (j = 0; j < N; j++) {
fprintf(stderr, "A[%d][%d] = %g\n", i, j, A[i * N + j]);
}
}*/
if(success)
printf("Success\n");
else
printf("Failed\n");
printf("Shutting down CULA\n\n");
culaShutdown();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
free(A);
free(A_bak);
free(X);
free(B);
free(IPIV);
hipFree(Ad);
hipFree(Ad_bak);
hipFree(Bd);
hipFree(Xd);
hipFree(IPIVd);
hipEventElapsedTime(&t_dgetrf, start, stop);
printf("\n Time taken for CULA Dgetrf is %f ms \n", t_dgetrf);
}
void culaDoubleComplexExample()
{
#ifdef NDEBUG
int N = 1024;
#else
int N = 128;
#endif
int NRHS = 1;
int i;
culaStatus status;
culaDoubleComplex* A = NULL;
culaDoubleComplex* B = NULL;
culaDoubleComplex* X = NULL;
culaInt* IPIV = NULL;
culaDoubleComplex one = { 1.0, 0.0 };
culaDouble thresh = 1e-6;
culaDouble diffr;
culaDouble diffc;
culaDouble diffabs;
printf("-------------------\n");
printf(" ZGESV\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaDoubleComplex*)malloc(N*N*sizeof(culaDoubleComplex));
B = (culaDoubleComplex*)malloc(N*sizeof(culaDoubleComplex));
X = (culaDoubleComplex*)malloc(N*sizeof(culaDoubleComplex));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV)
exit(EXIT_FAILURE);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaDoubleComplex));
for(i = 0; i < N; ++i)
A[i*N+i] = one;
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
{
B[i].x = (culaDouble)rand();
B[i].y = (culaDouble)rand();
}
memcpy(X, B, N*sizeof(culaDoubleComplex));
memset(IPIV, 0, N*sizeof(culaInt));
printf("Calling culaZgesv\n");
status = culaZgesv(N, NRHS, A, N, IPIV, X, N);
if(status == culaInsufficientComputeCapability)
{
printf("No Double precision support available, skipping example\n");
free(A);
free(B);
free(IPIV);
culaShutdown();
return;
}
checkStatus(status);
printf("Verifying Result\n");
for(i = 0; i < N; ++i)
{
diffr = X[i].x - B[i].x;
diffc = X[i].y - B[i].y;
diffabs = (culaDouble)sqrt(X[i].x*X[i].x+X[i].y*X[i].y)
- (culaDouble)sqrt(B[i].x*B[i].x+B[i].y*B[i].y);
if(diffr < 0.0)
diffr = -diffr;
if(diffc < 0.0)
diffc = -diffc;
if(diffabs < 0.0)
diffabs = -diffabs;
if(diffr > thresh || diffc > thresh || diffabs > thresh)
printf("Result check failed: i=%d X[i]=(%f,%f) B[i]=(%f,%f)", i, X[i].x, X[i].y, B[i].x, B[i].y);
}
printf("Shutting down CULA\n\n");
culaShutdown();
free(A);
free(B);
free(IPIV);
}
#endif
int main(int argc, char** argv)
{
culaDeviceFloatExample();
// culaFloatComplexExample();
// Note: CULA Premium is required for double-precision
#ifdef CULA_PREMIUM
culaDeviceDoubleExample();
// culaDoubleComplexExample();
#endif
return EXIT_SUCCESS;
}
|
9178d16b36a085272634dc90707875f7a70b1da6.cu
|
/*
* Copyright (C) 2009-2012 EM Photonics, Inc. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to EM Photonics ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code may
* not redistribute this code without the express written consent of EM
* Photonics, Inc.
*
* EM PHOTONICS MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED
* WARRANTY OF ANY KIND. EM PHOTONICS DISCLAIMS ALL WARRANTIES WITH REGARD TO
* THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL EM
* PHOTONICS BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
* DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as that
* term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of "commercial
* computer software" and "commercial computer software documentation" as
* such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the
* U.S. Government only as a commercial end item. Consistent with 48
* C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the source code with only those rights set
* forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code, the
* above Disclaimer and U.S. Government End Users Notice.
*
*/
/*
* CULA Example: systemSolve
*
* This example shows how to use a system solve for multiple data types. Each
* data type has its own example case for clarity. For each data type, the
* following steps are done:
*
* 1. Allocate a matrix on the host
* 2. Initialize CULA
* 3. Initialize the A matrix to the Identity
* 4. Call gesv on the matrix
* 5. Verify the results
* 6. Call culaShutdown
*
* After each CULA operation, the status of CULA is checked. On failure, an
* error message is printed and the program exits.
*
* Note: CULA Premium and double-precision GPU hardware are required to run the
* double-precision examples
*
* Note: this example performs a system solve on an identity matrix against a
* random vector, the result of which is that same random vector. This is not
* true in the general case and is only appropriate for this example. For a
* general case check, the product A*X should be checked against B. Note that
* because A is modifed by GESV, a copy of A would be needed with which to do
* the verification.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<cuda.h>
#include <cula_lapack.h>
#include<cula_lapack_device.h>
void checkStatus(culaStatus status)
{
char buf[256];
if(!status)
return;
culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf));
printf("%s\n", buf);
culaShutdown();
exit(EXIT_FAILURE);
}
void culaDeviceFloatExample()
{
#ifdef NDEBUG
int N = 2000;
#else
int N = 1024;
#endif
int NRHS = 1;
int i, j;
cudaEvent_t start, stop;
float t_sgetrf;
cudaEventCreate(&start);
cudaEventCreate(&stop);
culaStatus status;
culaFloat* A = NULL;
culaFloat* A_bak = NULL;
culaFloat* B = NULL;
culaFloat* X = NULL;
culaInt* IPIV = NULL;
culaDeviceFloat* Ad = NULL;
culaDeviceFloat* Ad_bak = NULL;
culaDeviceFloat* Bd = NULL;
culaDeviceFloat* Xd = NULL;
culaDeviceInt* IPIVd = NULL;
// culaFloat one = 2.0f;
culaFloat thresh = 1e-6f;
culaFloat diff;
printf("-------------------\n");
printf(" SGETRF\n");
printf("-------------------\n");
printf("Allocating Matrices on host\n");
A = (culaFloat*)malloc(N*N*sizeof(culaFloat));
A_bak = (culaFloat*)malloc(N*N*sizeof(culaFloat));
B = (culaFloat*)malloc(N*sizeof(culaFloat));
X = (culaFloat*)malloc(N*sizeof(culaFloat));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV || !A_bak)
exit(EXIT_FAILURE);
printf("Allocating Matrices on device\n");
cudaMalloc((void**)&Ad,N*N*sizeof(culaFloat));
// cudaMalloc((void**)&Ad_bak,N*N*sizeof(culaFloat));
cudaMalloc((void**)&Bd,N*sizeof(culaFloat));
cudaMalloc((void**)&Xd,N*sizeof(culaFloat));
cudaMalloc((void**)&IPIVd,N*sizeof(culaInt));
cudaEventRecord(start, 0);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaFloat));
memset(A_bak, 0, N*N*sizeof(culaFloat));
for(i = 0; i < N; ++i) {
A_bak[i*N+i] = A[i * N + i] = 2.f;
// printf("%g, %g\n", one, A[i * N + i]);
}
/* A[0]=3;
A[1]=3;
A[2]=0;
A[3]=0;
A[4]=2;
A[5]=2;
A[6]=1;
A[7]=0;
A[8]=1;*/
//Printing the matix
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
B[i] = (culaFloat)(rand() % 10);
memcpy(X, B, N*sizeof(culaFloat));
memset(IPIV, 0, N*sizeof(culaInt));
//Copy from Host to Device
cudaMemcpy(Ad,A, N*N*sizeof(culaFloat),cudaMemcpyHostToDevice);
// cudaMemcpy(Ad_bak,A_bak, N*N*sizeof(culaFloat),cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B, N*sizeof(culaFloat),cudaMemcpyHostToDevice);
cudaMemcpy(Xd,X, N*sizeof(culaFloat),cudaMemcpyHostToDevice);
cudaMemcpy(IPIVd,IPIV, N*sizeof(culaInt),cudaMemcpyHostToDevice);
/* //Printing the matix
printf("\n\n");
for (i=0; i< N*N; ++i)
{
printf("%f,",A[i]);
if((i+1)%N==0)
printf("\n");
}*/
printf("Calling culaSgetrf\n");
status = culaDeviceSgetrf(N, N, Ad, N, IPIVd);
checkStatus(status);
//Copy result from Device to Host
cudaMemcpy(A,Ad, N*N*sizeof(culaFloat),cudaMemcpyDeviceToHost);
// cudaMemcpy(A_bak,Ad_bak, N*N*sizeof(culaFloat),cudaMemcpyDeviceToHost);
cudaMemcpy(B,Bd, N*sizeof(culaFloat),cudaMemcpyDeviceToHost);
cudaMemcpy(X,Xd, N*sizeof(culaFloat),cudaMemcpyDeviceToHost);
cudaMemcpy(IPIV,IPIVd, N*sizeof(culaInt),cudaMemcpyDeviceToHost);
printf("Verifying Result\n");
int success = 1;
float max_b = 0.0;
for(i =0; i< N; i++)
if(max_b < fabs(B[i]))
max_b = fabs(B[i]);
/* for(i = 0; i < N; ++i)
{
fprintf(stderr, "X[%d] = %g, B[%d] = %g\n", i, X[i], i, B[i]);
}*/
if (success)
printf("Success\n");
else
printf("Failed\n");
printf("Shutting down CULA\n\n");
culaShutdown();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//printf("\n Time taken for CULA Sgesv is %f", t_sgesv);
free(A);
free(A_bak);
free(X);
free(B);
free(IPIV);
cudaFree(Ad);
cudaFree(Ad_bak);
cudaFree(Bd);
cudaFree(Xd);
cudaFree(IPIVd);
cudaEventElapsedTime(&t_sgetrf, start, stop);
printf("\n Time taken for CULA Sgetrf is %f ms\n", t_sgetrf);
}
void culaDeviceFloatComplexExample()
{
#ifdef NDEBUG
int N = 4096;
#else
int N = 512;
#endif
int NRHS = 1;
int i;
culaStatus status;
culaFloatComplex* A = NULL;
culaFloatComplex* B = NULL;
culaFloatComplex* X = NULL;
culaInt* IPIV = NULL;
culaFloatComplex one = { 1.0f, 0.0f };
culaFloat thresh = 1e-6f;
culaFloat diffr;
culaFloat diffc;
culaFloat diffabs;
printf("-------------------\n");
printf(" CGESV\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaFloatComplex*)malloc(N*N*sizeof(culaFloatComplex));
B = (culaFloatComplex*)malloc(N*sizeof(culaFloatComplex));
X = (culaFloatComplex*)malloc(N*sizeof(culaFloatComplex));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV)
exit(EXIT_FAILURE);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaFloatComplex));
for(i = 0; i < N; ++i)
A[i*N+i] = one;
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
{
B[i].x = (culaFloat)rand();
B[i].y = (culaFloat)rand();
}
memcpy(X, B, N*sizeof(culaFloatComplex));
memset(IPIV, 0, N*sizeof(culaInt));
printf("Calling culaCgesv\n");
status = culaCgesv(N, NRHS, A, N, IPIV, X, N);
checkStatus(status);
printf("Verifying Result\n");
for(i = 0; i < N; ++i)
{
diffr = X[i].x - B[i].x;
diffc = X[i].y - B[i].y;
diffabs = (culaFloat)sqrt(X[i].x*X[i].x+X[i].y*X[i].y)
- (culaFloat)sqrt(B[i].x*B[i].x+B[i].y*B[i].y);
if(diffr < 0.0f)
diffr = -diffr;
if(diffc < 0.0f)
diffc = -diffc;
if(diffabs < 0.0f)
diffabs = -diffabs;
if(diffr > thresh || diffc > thresh || diffabs > thresh)
printf("Result check failed: i=%d X[i]=(%f,%f) B[i]=(%f,%f)", i, X[i].x, X[i].y, B[i].x, B[i].y);
}
printf("Shutting down CULA\n\n");
culaShutdown();
free(A);
free(B);
free(IPIV);
}
// Note: CULA Premium is required for double-precision
#ifdef CULA_PREMIUM
void culaDeviceDoubleExample()
{
#ifdef NDEBUG
int N = 2000;
#else
int N = 512;
#endif
int NRHS = 1;
int i,j;
cudaEvent_t start, stop;
float t_dgetrf;
cudaEventCreate(&start);
cudaEventCreate(&stop);
culaStatus status;
culaDouble* A = NULL;
culaDouble* A_bak = NULL;
culaDouble* B = NULL;
culaDouble* X = NULL;
culaInt* IPIV = NULL;
culaDeviceDouble* Ad = NULL;
culaDeviceDouble* Ad_bak = NULL;
culaDeviceDouble* Bd = NULL;
culaDeviceDouble* Xd = NULL;
culaDeviceInt* IPIVd = NULL;
// culaDouble *work = NULL;
// culaDouble *swork = NULL;
// int *info;
// culaDouble one = 1.0;
culaDouble thresh = 1e-6;
culaDouble diff;
printf("\t-------------------\n");
printf(" DGETRF\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaDouble*)malloc(N*N*sizeof(culaDouble));
A_bak = (culaDouble*)malloc(N*N*sizeof(culaDouble));
B = (culaDouble*)malloc(N*sizeof(culaDouble));
X = (culaDouble*)malloc(N*sizeof(culaDouble));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
// work = (culaDouble*)malloc(N * NRHS * sizeof(culaDouble));
//swork = (culaDouble*)malloc(N * (N+NRHS) * sizeof(culaDouble));
// info = (int *)malloc(N * sizeof(int));
if(!A || !B || !IPIV || !A_bak)
exit(EXIT_FAILURE);
printf("Allocating Matrices on device\n");
cudaMalloc((void**)&Ad,N*N*sizeof(culaDouble));
// cudaMalloc((void**)&Ad_bak,N*N*sizeof(culaFloat));
cudaMalloc((void**)&Bd,N*sizeof(culaDouble));
cudaMalloc((void**)&Xd,N*sizeof(culaDouble));
cudaMalloc((void**)&IPIVd,N*sizeof(culaInt));
cudaEventRecord(start, 0);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaDouble));
memset(A_bak, 0, N*N*sizeof(culaDouble));
for(i = 0; i < N; ++i){
A_bak[i * N + i] = A[i*N + i] = 2.f;
if (i > 0)
A_bak[i * N + i-1] = A[i*N + i-1] = 0.5f;
if (i < N - 1)
A_bak[i * N + i+1] = A[i*N + i+1] = 0.5f;
}
// Set B to a random matrix (see note at top
for(i = 0; i < N; ++i)
B[i] = (culaDouble)(rand() % 10);
memcpy(X, B, N*sizeof(culaDouble));
memset(IPIV, 0, N*sizeof(culaInt));
//Copy from Host to Device
cudaMemcpy(Ad,A, N*N*sizeof(culaDouble),cudaMemcpyHostToDevice);
// cudaMemcpy(Ad_bak,A_bak, N*N*sizeof(culaFloat),cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B, N*sizeof(culaDouble),cudaMemcpyHostToDevice);
cudaMemcpy(Xd,X, N*sizeof(culaDouble),cudaMemcpyHostToDevice);
cudaMemcpy(IPIVd,IPIV, N*sizeof(culaInt),cudaMemcpyHostToDevice);
printf("Calling culaDgetrf\n");
int iter = 0;
status = culaDeviceDgetrf(N, N, Ad, N, IPIVd);
// printf("iter = %d\n", iter);
if(status == culaInsufficientComputeCapability)
{
printf("No Double precision support available, skipping example\n");
free(A);
free(B);
free(IPIV);
culaShutdown();
return;
}
checkStatus(status);
//Copy result from Device to Host
cudaMemcpy(A,Ad, N*N*sizeof(culaDouble),cudaMemcpyDeviceToHost);
// cudaMemcpy(A_bak,Ad_bak, N*N*sizeof(culaFloat),cudaMemcpyDeviceToHost);
cudaMemcpy(B,Bd, N*sizeof(culaDouble),cudaMemcpyDeviceToHost);
cudaMemcpy(Xd,X, N*sizeof(culaDouble),cudaMemcpyDeviceToHost);
cudaMemcpy(IPIVd,IPIV, N*sizeof(culaInt),cudaMemcpyDeviceToHost);
printf("Verifying Result\n");
int success = 1;
double max_b = 0.0;
for (i = 0; i < N; i++)
if (max_b < fabs(B[i]))
max_b = fabs(B[i]);
/* for(i = 0; i < N; ++i)
{
fprintf(stderr, "X[%d] = %g,B[%d] = %g\n", i, X[i], i, B[i]);
}
for (i = 0; i < N; i++) {
int j;
for (j = 0; j < N; j++) {
fprintf(stderr, "A[%d][%d] = %g\n", i, j, A[i * N + j]);
}
}*/
if(success)
printf("Success\n");
else
printf("Failed\n");
printf("Shutting down CULA\n\n");
culaShutdown();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
free(A);
free(A_bak);
free(X);
free(B);
free(IPIV);
cudaFree(Ad);
cudaFree(Ad_bak);
cudaFree(Bd);
cudaFree(Xd);
cudaFree(IPIVd);
cudaEventElapsedTime(&t_dgetrf, start, stop);
printf("\n Time taken for CULA Dgetrf is %f ms \n", t_dgetrf);
}
void culaDoubleComplexExample()
{
#ifdef NDEBUG
int N = 1024;
#else
int N = 128;
#endif
int NRHS = 1;
int i;
culaStatus status;
culaDoubleComplex* A = NULL;
culaDoubleComplex* B = NULL;
culaDoubleComplex* X = NULL;
culaInt* IPIV = NULL;
culaDoubleComplex one = { 1.0, 0.0 };
culaDouble thresh = 1e-6;
culaDouble diffr;
culaDouble diffc;
culaDouble diffabs;
printf("-------------------\n");
printf(" ZGESV\n");
printf("-------------------\n");
printf("Allocating Matrices\n");
A = (culaDoubleComplex*)malloc(N*N*sizeof(culaDoubleComplex));
B = (culaDoubleComplex*)malloc(N*sizeof(culaDoubleComplex));
X = (culaDoubleComplex*)malloc(N*sizeof(culaDoubleComplex));
IPIV = (culaInt*)malloc(N*sizeof(culaInt));
if(!A || !B || !IPIV)
exit(EXIT_FAILURE);
printf("Initializing CULA\n");
status = culaInitialize();
checkStatus(status);
// Set A to the identity matrix
memset(A, 0, N*N*sizeof(culaDoubleComplex));
for(i = 0; i < N; ++i)
A[i*N+i] = one;
// Set B to a random matrix (see note at top)
for(i = 0; i < N; ++i)
{
B[i].x = (culaDouble)rand();
B[i].y = (culaDouble)rand();
}
memcpy(X, B, N*sizeof(culaDoubleComplex));
memset(IPIV, 0, N*sizeof(culaInt));
printf("Calling culaZgesv\n");
status = culaZgesv(N, NRHS, A, N, IPIV, X, N);
if(status == culaInsufficientComputeCapability)
{
printf("No Double precision support available, skipping example\n");
free(A);
free(B);
free(IPIV);
culaShutdown();
return;
}
checkStatus(status);
printf("Verifying Result\n");
for(i = 0; i < N; ++i)
{
diffr = X[i].x - B[i].x;
diffc = X[i].y - B[i].y;
diffabs = (culaDouble)sqrt(X[i].x*X[i].x+X[i].y*X[i].y)
- (culaDouble)sqrt(B[i].x*B[i].x+B[i].y*B[i].y);
if(diffr < 0.0)
diffr = -diffr;
if(diffc < 0.0)
diffc = -diffc;
if(diffabs < 0.0)
diffabs = -diffabs;
if(diffr > thresh || diffc > thresh || diffabs > thresh)
printf("Result check failed: i=%d X[i]=(%f,%f) B[i]=(%f,%f)", i, X[i].x, X[i].y, B[i].x, B[i].y);
}
printf("Shutting down CULA\n\n");
culaShutdown();
free(A);
free(B);
free(IPIV);
}
#endif
int main(int argc, char** argv)
{
culaDeviceFloatExample();
// culaFloatComplexExample();
// Note: CULA Premium is required for double-precision
#ifdef CULA_PREMIUM
culaDeviceDoubleExample();
// culaDoubleComplexExample();
#endif
return EXIT_SUCCESS;
}
|
429b6bae4d6dd6e53c9abe0e40ff2a2f60ad7d3b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "cudaTools.h"
#include "Indice1D.h"
#include "Indice2D.h"
#include "Device.h"
#include "IndiceTools.h"
#include "RipplingMath.h"
__global__ void ripplingOneToOne(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingOneDimension(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingTwoDimensions(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingOneToOne(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int s = j + gridDim.x * blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
}
__global__ void ripplingOneDimension(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
const int NB_THREADS = Indice1D::nbThread();
const int TID = Indice1D::tid();
const int n = w * h;
int s = TID;
while( s < n ) {
int i, j;
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREADS;
}
}
__global__ void ripplingTwoDimensions(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
const int NB_THREADS = Indice2D::nbThread();
const int TID = Indice2D::tid();
const int n = w * h;
int s = TID;
while( s < n ) {
int i, j;
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREADS;
}
}
|
429b6bae4d6dd6e53c9abe0e40ff2a2f60ad7d3b.cu
|
#include <iostream>
#include "cudaTools.h"
#include "Indice1D.h"
#include "Indice2D.h"
#include "Device.h"
#include "IndiceTools.h"
#include "RipplingMath.h"
__global__ void ripplingOneToOne(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingOneDimension(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingTwoDimensions(uchar4* ptrDevPixels, int w, int h, float t);
__global__ void ripplingOneToOne(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int s = j + gridDim.x * blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
}
__global__ void ripplingOneDimension(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
const int NB_THREADS = Indice1D::nbThread();
const int TID = Indice1D::tid();
const int n = w * h;
int s = TID;
while( s < n ) {
int i, j;
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREADS;
}
}
__global__ void ripplingTwoDimensions(uchar4* ptrDevPixels, int w, int h, float t) {
RipplingMath ripplingMath(w, h);
const int NB_THREADS = Indice2D::nbThread();
const int TID = Indice2D::tid();
const int n = w * h;
int s = TID;
while( s < n ) {
int i, j;
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREADS;
}
}
|
990316f8eee58e2d534573e14a8678da7c0d4305.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "loop_kernel.hu"
__global__ void kernel0(float *dst, float *tab)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
if (32 * b0 + t0 <= 1999)
for (int c3 = t1; c3 <= ppcg_min(31, -32 * b1 + 1999); c3 += 16)
dst[64000 * b0 + 32 * b1 + 2000 * t0 + c3] = (tab[64000 * b0 + 32 * b1 + 2000 * t0 + c3] + tab[64000 * b0 + 32 * b1 + 2000 * t0 + c3]);
}
|
990316f8eee58e2d534573e14a8678da7c0d4305.cu
|
#include "loop_kernel.hu"
__global__ void kernel0(float *dst, float *tab)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
if (32 * b0 + t0 <= 1999)
for (int c3 = t1; c3 <= ppcg_min(31, -32 * b1 + 1999); c3 += 16)
dst[64000 * b0 + 32 * b1 + 2000 * t0 + c3] = (tab[64000 * b0 + 32 * b1 + 2000 * t0 + c3] + tab[64000 * b0 + 32 * b1 + 2000 * t0 + c3]);
}
|
e5e75815a0a01e3fe97162c0d108d76f450ad222.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************
* Copyright (c) 2017, Palo Alto Research Center. *
* All rights reserved. *
*************************************************************/
#include <stdio.h>
#ifdef WIN32
#include "getopt_win32.h"
#else
#include <unistd.h>
#endif
#include <vector>
#include "hml.h"
using namespace std;
#define cHmlPagerankDampingFactorDefault 0.85f
#define cHmlPagerankOutFileNameExtension "k2v"
int main(int argc, char *argv[])
{
char helpMsg[] =
"This program does GPU-based PageRank.\n"
"Options:\n"
"\t-c Cross check with CPU-based PageRank results\n"
"\t-d <damping factor>\n"
"\t-h Print this help message\n"
"\t-i <number of PageRank iteration>\n"
"\t-k <number of top-ranked pages to be printed>\n"
"\t-o <output file name prefix>\n"
"\t-g <graph file name>\n"
"\t-u <GPU ID in [0, #GPUs - 1]>\n"
"\t-y <verbosity level in [0,2]>\n";
int option;
uint32_t verbosity = 0;
HmlGraph hostGraphVal;
HmlGraph *hostGraph = &hostGraphVal;
vector<float> dampingFactors;
float dampingFactor = 0.85;
uint32_t numIters = 10;
uint32_t printTopK = (uint32_t)-1;
int gpuId = 0;
int count;
hipDeviceProp_t prop;
char *graphFileName = NULL;
char *outFilenamePrefix = NULL;
double cpuStart;
double cpuEnd;
double wallStart;
double wallEnd;
bool runCPU = false;
size_t freeBytesStart;
size_t totalBytesStart;
size_t freeBytesEnd;
size_t totalBytesEnd;
/* get program options */
while ((option = getopt(argc, argv, ":cd:hi:k:o:r:u:y:")) != -1) {
switch (option) {
case 'c':
runCPU = true;
break;
case 'd':
dampingFactor = atof(optarg);
dampingFactors.push_back(dampingFactor);
if (verbosity >= 1)
fprintf(stderr, "; Info: damping factor = %lf\n", dampingFactor);
break;
case 'g':
graphFileName = optarg;
break;
case 'h':
fprintf(stderr, "Help:\n%s\n", helpMsg);
exit(EXIT_FAILURE);
break;
case 'i':
numIters = atoi(optarg);
break;
case 'k':
printTopK = atoi(optarg);
break;
case 'o':
outFilenamePrefix = optarg;
break;
case 'u':
gpuId = atoi(optarg);
break;
case 'y':
verbosity = atoi(optarg);
break;
case ':':
fprintf(stderr, "; Error: Option -%c requires an argument\n", optopt);
exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "; Error: Unknown option character '%c'.\n", optopt);
exit(EXIT_FAILURE);
}
}
/* the last argument is the name of the graph file, which
* is not optional
*/
if (optind == argc - 1) {
graphFileName = argv[optind];
}
HANDLE_ERROR(hipGetDeviceCount(&count));
if (verbosity >= 2) {
for (int i = 0; i < count; ++i) {
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
fprintf(stderr, "[cuda_device_%d]\n", i);
hmlDevicePropertyPrint(&prop);
}
}
/* choose which device to run the kernel code */
if (gpuId >= count) {
fprintf(stderr,
"; Error: Invalid GPU card #%d, resetting to default (0)\n", gpuId);
gpuId = 0;
}
HANDLE_ERROR(hipSetDevice(gpuId));
if (verbosity >= 2) {
fprintf(stderr, "; Info: Set cuda device to %d\n", gpuId);
HANDLE_ERROR(hipMemGetInfo(&freeBytesStart, &totalBytesStart));
fprintf(stderr,
"; Info: Free memory = %ld bytes, total memory = %ld bytes\n",
freeBytesStart, totalBytesStart);
}
if (verbosity >= 1)
hmlGetSecs(&cpuStart, &wallStart);
hmlGraphReadTsv4(graphFileName, false, hostGraph);
if (verbosity >= 1) {
hmlGetSecs(&cpuEnd, &wallEnd);
fprintf(stderr,
"; Info: HmlGraph reading: cpu time = %.2lf, wall time = %.2lf\n",
(cpuEnd - cpuStart) * 1000, (wallEnd - wallStart) * 1000);
}
if (verbosity >= 2)
hmlGraphPrintStats(stderr, hostGraph);
/* hmlGraphPrintEdges(stdout, hostGraph, false); */
if (dampingFactors.empty())
dampingFactors.push_back(cHmlPagerankDampingFactorDefault);
/* perform pagerank on CPU ? */
for (size_t i = 0; i < dampingFactors.size(); ++i) {
if (runCPU == true) {
hmlPagerankSpmvCpu(hostGraph, dampingFactors[i], numIters, printTopK,
outFilenamePrefix, cHmlPagerankOutFileNameExtension);
}
else {
hmlPagerankSpmvGpu(hostGraph, dampingFactors[i], numIters, printTopK,
outFilenamePrefix, cHmlPagerankOutFileNameExtension, verbosity);
}
}
hmlGraphDeleteHost(hostGraph);
if (verbosity >= 2) {
HANDLE_ERROR(hipMemGetInfo(&freeBytesEnd, &totalBytesEnd));
fprintf(stderr,
"; Info: Free memory = %ld bytes, total memory = %ld bytes\n",
freeBytesEnd, totalBytesEnd);
if (freeBytesStart != freeBytesEnd || totalBytesStart != totalBytesEnd)
fprintf(stderr,
"; Info: Memory leak: %ld bytes, %ld total bytes\n",
freeBytesStart - freeBytesEnd, totalBytesStart - totalBytesEnd);
}
HANDLE_ERROR(hipDeviceReset());
}
|
e5e75815a0a01e3fe97162c0d108d76f450ad222.cu
|
/*************************************************************
* Copyright (c) 2017, Palo Alto Research Center. *
* All rights reserved. *
*************************************************************/
#include <stdio.h>
#ifdef WIN32
#include "getopt_win32.h"
#else
#include <unistd.h>
#endif
#include <vector>
#include "hml.h"
using namespace std;
#define cHmlPagerankDampingFactorDefault 0.85f
#define cHmlPagerankOutFileNameExtension "k2v"
int main(int argc, char *argv[])
{
char helpMsg[] =
"This program does GPU-based PageRank.\n"
"Options:\n"
"\t-c Cross check with CPU-based PageRank results\n"
"\t-d <damping factor>\n"
"\t-h Print this help message\n"
"\t-i <number of PageRank iteration>\n"
"\t-k <number of top-ranked pages to be printed>\n"
"\t-o <output file name prefix>\n"
"\t-g <graph file name>\n"
"\t-u <GPU ID in [0, #GPUs - 1]>\n"
"\t-y <verbosity level in [0,2]>\n";
int option;
uint32_t verbosity = 0;
HmlGraph hostGraphVal;
HmlGraph *hostGraph = &hostGraphVal;
vector<float> dampingFactors;
float dampingFactor = 0.85;
uint32_t numIters = 10;
uint32_t printTopK = (uint32_t)-1;
int gpuId = 0;
int count;
cudaDeviceProp prop;
char *graphFileName = NULL;
char *outFilenamePrefix = NULL;
double cpuStart;
double cpuEnd;
double wallStart;
double wallEnd;
bool runCPU = false;
size_t freeBytesStart;
size_t totalBytesStart;
size_t freeBytesEnd;
size_t totalBytesEnd;
/* get program options */
while ((option = getopt(argc, argv, ":cd:hi:k:o:r:u:y:")) != -1) {
switch (option) {
case 'c':
runCPU = true;
break;
case 'd':
dampingFactor = atof(optarg);
dampingFactors.push_back(dampingFactor);
if (verbosity >= 1)
fprintf(stderr, "; Info: damping factor = %lf\n", dampingFactor);
break;
case 'g':
graphFileName = optarg;
break;
case 'h':
fprintf(stderr, "Help:\n%s\n", helpMsg);
exit(EXIT_FAILURE);
break;
case 'i':
numIters = atoi(optarg);
break;
case 'k':
printTopK = atoi(optarg);
break;
case 'o':
outFilenamePrefix = optarg;
break;
case 'u':
gpuId = atoi(optarg);
break;
case 'y':
verbosity = atoi(optarg);
break;
case ':':
fprintf(stderr, "; Error: Option -%c requires an argument\n", optopt);
exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "; Error: Unknown option character '%c'.\n", optopt);
exit(EXIT_FAILURE);
}
}
/* the last argument is the name of the graph file, which
* is not optional
*/
if (optind == argc - 1) {
graphFileName = argv[optind];
}
HANDLE_ERROR(cudaGetDeviceCount(&count));
if (verbosity >= 2) {
for (int i = 0; i < count; ++i) {
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
fprintf(stderr, "[cuda_device_%d]\n", i);
hmlDevicePropertyPrint(&prop);
}
}
/* choose which device to run the kernel code */
if (gpuId >= count) {
fprintf(stderr,
"; Error: Invalid GPU card #%d, resetting to default (0)\n", gpuId);
gpuId = 0;
}
HANDLE_ERROR(cudaSetDevice(gpuId));
if (verbosity >= 2) {
fprintf(stderr, "; Info: Set cuda device to %d\n", gpuId);
HANDLE_ERROR(cudaMemGetInfo(&freeBytesStart, &totalBytesStart));
fprintf(stderr,
"; Info: Free memory = %ld bytes, total memory = %ld bytes\n",
freeBytesStart, totalBytesStart);
}
if (verbosity >= 1)
hmlGetSecs(&cpuStart, &wallStart);
hmlGraphReadTsv4(graphFileName, false, hostGraph);
if (verbosity >= 1) {
hmlGetSecs(&cpuEnd, &wallEnd);
fprintf(stderr,
"; Info: HmlGraph reading: cpu time = %.2lf, wall time = %.2lf\n",
(cpuEnd - cpuStart) * 1000, (wallEnd - wallStart) * 1000);
}
if (verbosity >= 2)
hmlGraphPrintStats(stderr, hostGraph);
/* hmlGraphPrintEdges(stdout, hostGraph, false); */
if (dampingFactors.empty())
dampingFactors.push_back(cHmlPagerankDampingFactorDefault);
/* perform pagerank on CPU ? */
for (size_t i = 0; i < dampingFactors.size(); ++i) {
if (runCPU == true) {
hmlPagerankSpmvCpu(hostGraph, dampingFactors[i], numIters, printTopK,
outFilenamePrefix, cHmlPagerankOutFileNameExtension);
}
else {
hmlPagerankSpmvGpu(hostGraph, dampingFactors[i], numIters, printTopK,
outFilenamePrefix, cHmlPagerankOutFileNameExtension, verbosity);
}
}
hmlGraphDeleteHost(hostGraph);
if (verbosity >= 2) {
HANDLE_ERROR(cudaMemGetInfo(&freeBytesEnd, &totalBytesEnd));
fprintf(stderr,
"; Info: Free memory = %ld bytes, total memory = %ld bytes\n",
freeBytesEnd, totalBytesEnd);
if (freeBytesStart != freeBytesEnd || totalBytesStart != totalBytesEnd)
fprintf(stderr,
"; Info: Memory leak: %ld bytes, %ld total bytes\n",
freeBytesStart - freeBytesEnd, totalBytesStart - totalBytesEnd);
}
HANDLE_ERROR(cudaDeviceReset());
}
|
5c0f0a03af8a68fada2ed15d101e73d00245de63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void shift(int *xdata, int length) {
__shared__ int data[1024];
if (threadIdx.x >= length-1) return;
data[threadIdx.x] = threadIdx.x;
if (threadIdx.x > 0) {
data[threadIdx.x-1] = data[threadIdx.x];
}
// copy to global so host can see it
for (int i = 0; i < length; i++) {
xdata[i] = data[i];
}
}
int main() {
int h_data[1024];
for (int i = 0; i < 1024; i++) {
h_data[i] = i;
}
void *d_data;
hipMalloc(&d_data,1024*sizeof(int));
hipMemcpy(d_data,h_data,1024*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( shift), dim3(1),dim3(1024), 0, 0, (int*) d_data,1024);
hipMemcpy(h_data,d_data,1024*sizeof(int),hipMemcpyDeviceToHost);
hipFree(d_data);
// lets make sure answer is correct
for (int i = 0; i < 1023; i++) {
if (h_data[i] != (i+1)) {
printf("Differ at position %d value computed %d value expected %d\n",i,h_data[i],i+1);
}
}
}
|
5c0f0a03af8a68fada2ed15d101e73d00245de63.cu
|
#include <stdio.h>
__global__ void shift(int *xdata, int length) {
__shared__ int data[1024];
if (threadIdx.x >= length-1) return;
data[threadIdx.x] = threadIdx.x;
if (threadIdx.x > 0) {
data[threadIdx.x-1] = data[threadIdx.x];
}
// copy to global so host can see it
for (int i = 0; i < length; i++) {
xdata[i] = data[i];
}
}
int main() {
int h_data[1024];
for (int i = 0; i < 1024; i++) {
h_data[i] = i;
}
void *d_data;
cudaMalloc(&d_data,1024*sizeof(int));
cudaMemcpy(d_data,h_data,1024*sizeof(int),cudaMemcpyHostToDevice);
shift<<<1,1024>>>((int*) d_data,1024);
cudaMemcpy(h_data,d_data,1024*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_data);
// lets make sure answer is correct
for (int i = 0; i < 1023; i++) {
if (h_data[i] != (i+1)) {
printf("Differ at position %d value computed %d value expected %d\n",i,h_data[i],i+1);
}
}
}
|
2eea88da251e2b980faf559fc561a82171af94c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <ctime>
#include <stdlib.h>
//const int arraySize = 1024 * 128;
//const int arraySize = 1024 * 256;
//const int arraySize = 1024 * 512;
const int arraySize = 1024 * 1024;
//const int block_size = 32;
//const int block_size = 128;
//const int block_size = 512;
const int block_size = 1024;
hipError_t sumWithCuda(float *c, float *a, unsigned int size, int type);
template <int BLOCK_SIZE> __global__ void sumKernelStr2(float *c, float*a) {
__shared__ float sdata[BLOCK_SIZE*2];
unsigned int tid = 2*threadIdx.x;
unsigned int i = blockIdx.x * 2*blockDim.x + 2*threadIdx.x;
sdata[tid] = a[i];
sdata[tid + 1] = a[i + 1];
__syncthreads();
for (unsigned int odstep = 1; odstep < 2*blockDim.x; odstep *= 2) {
int index = odstep*tid;
if (index < 2*blockDim.x) {
sdata[index] += sdata[index + odstep];
}
__syncthreads();
}
if (tid == 0) c[blockIdx.x] = sdata[0];
}
template <int BLOCK_SIZE> __global__ void sumKernelStr3(float *c, float *a) {
__shared__ float sdata[BLOCK_SIZE * 2];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2*blockDim.x + threadIdx.x;
sdata[tid] = a[i];
sdata[tid + blockDim.x] = a[i + blockDim.x];
__syncthreads();
for (unsigned int odstep = blockDim.x; odstep > 0; odstep /= 2) {
if (tid < odstep) sdata[tid] += sdata[tid + odstep];
__syncthreads();
}
if (tid == 0) c[blockIdx.x] = sdata[0];
}
int main()
{
srand(time(NULL));
float *a = (float*)malloc(sizeof(float)*arraySize);
for (int i = 0; i < arraySize; i++) a[i] = (float)(rand() % 20);
float c[1] = { 0 };
// Sum vector parallel.
hipError_t cudaStatus = sumWithCuda(c, a, arraySize, 2);
cudaStatus = sumWithCuda(c, a, arraySize, 3);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
getchar();
return 1;
}
//for (int i = 0; i < arraySize; i++) printf("+%f", a[i]);
//printf("=%f\n",c[0]);
for (int i = 1; i < arraySize; i++) a[0] += a[i];
if (a[0] != c[0]) printf("DUPA! %f!=%f",a[0],c[0]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
getchar();
return 1;
}
free(a);
//getchar();
return 0;
}
// Helper function for using CUDA to sum vector in parallel.
hipError_t sumWithCuda(float *c, float *a, unsigned int size, int type)
{
float *dev_a = 0;
float *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
hipDeviceProp_t deviceProp;
cudaStatus = hipGetDeviceProperties(&deviceProp, 0);
if (deviceProp.computeMode == hipComputeModeProhibited){
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
getchar();
exit(EXIT_SUCCESS);
}
if (cudaStatus != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", cudaStatus, __LINE__);
else printf("GPU Device %d: \"%s\" with compute capability %d.%d MP:%d TH_MUL:%d TH:%d WARP:%d SH_MEM_BLOCK:%d %d\n\n", 0,
deviceProp.name, deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount, deviceProp.maxThreadsPerMultiProcessor, deviceProp.maxThreadsPerBlock, deviceProp.warpSize, deviceProp.sharedMemPerBlock, deviceProp.maxGridSize
);
int threads = size/2;
if (size
>2*block_size) threads = block_size;
int grid = size/threads/2;
// Allocate GPU buffers for 2 vectors (1 input, 1 output).
cudaStatus = hipMalloc((void**)&dev_c, size / threads * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
hipEvent_t stop;
if ((cudaStatus = hipEventCreate(&start)) != hipSuccess){
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = hipEventCreate(&stop)) != hipSuccess){
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = hipEventRecord(start, NULL)) != hipSuccess){
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
int iter = 1;
for (int i = 0; i < iter; i++) {
if (type == 2)sumKernelStr2<block_size><< <grid, threads >> > (dev_c, dev_a);
if (type == 3)sumKernelStr3<block_size><< <grid, threads >> > (dev_c, dev_a);
while (grid > 1) {
if (grid > 2*block_size) grid /= (block_size*2);
else {
threads = grid/2;
grid = 1;
}
if (type == 2)sumKernelStr2<block_size> << <grid, threads >> > (dev_c, dev_c);
if (type == 3)sumKernelStr3<block_size> << <grid, threads >> > (dev_c, dev_c);
}
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
if ((cudaStatus = hipEventRecord(stop, NULL)) != hipSuccess){
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = hipEventSynchronize(stop)) != hipSuccess){
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
float msecTotal = 0.0f;
if ((cudaStatus = hipEventElapsedTime(&msecTotal, start, stop)) != hipSuccess){
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
// Compute and print the performance
float msecPerVectorSum = msecTotal / iter;
double flopsPeVectorSum = size;
double gigaFlops = (flopsPeVectorSum * 1.0e-9f) / (msecPerVectorSum / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerVectorSum,
flopsPeVectorSum,
threads);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, 1 * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
return cudaStatus;
}
|
2eea88da251e2b980faf559fc561a82171af94c8.cu
|
#include <cuda.h>
#include "cuda_runtime.h"
#include <device_functions.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <ctime>
#include <stdlib.h>
//const int arraySize = 1024 * 128;
//const int arraySize = 1024 * 256;
//const int arraySize = 1024 * 512;
const int arraySize = 1024 * 1024;
//const int block_size = 32;
//const int block_size = 128;
//const int block_size = 512;
const int block_size = 1024;
cudaError_t sumWithCuda(float *c, float *a, unsigned int size, int type);
template <int BLOCK_SIZE> __global__ void sumKernelStr2(float *c, float*a) {
__shared__ float sdata[BLOCK_SIZE*2];
unsigned int tid = 2*threadIdx.x;
unsigned int i = blockIdx.x * 2*blockDim.x + 2*threadIdx.x;
sdata[tid] = a[i];
sdata[tid + 1] = a[i + 1];
__syncthreads();
for (unsigned int odstep = 1; odstep < 2*blockDim.x; odstep *= 2) {
int index = odstep*tid;
if (index < 2*blockDim.x) {
sdata[index] += sdata[index + odstep];
}
__syncthreads();
}
if (tid == 0) c[blockIdx.x] = sdata[0];
}
template <int BLOCK_SIZE> __global__ void sumKernelStr3(float *c, float *a) {
__shared__ float sdata[BLOCK_SIZE * 2];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2*blockDim.x + threadIdx.x;
sdata[tid] = a[i];
sdata[tid + blockDim.x] = a[i + blockDim.x];
__syncthreads();
for (unsigned int odstep = blockDim.x; odstep > 0; odstep /= 2) {
if (tid < odstep) sdata[tid] += sdata[tid + odstep];
__syncthreads();
}
if (tid == 0) c[blockIdx.x] = sdata[0];
}
int main()
{
srand(time(NULL));
float *a = (float*)malloc(sizeof(float)*arraySize);
for (int i = 0; i < arraySize; i++) a[i] = (float)(rand() % 20);
float c[1] = { 0 };
// Sum vector parallel.
cudaError_t cudaStatus = sumWithCuda(c, a, arraySize, 2);
cudaStatus = sumWithCuda(c, a, arraySize, 3);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
getchar();
return 1;
}
//for (int i = 0; i < arraySize; i++) printf("+%f", a[i]);
//printf("=%f\n",c[0]);
for (int i = 1; i < arraySize; i++) a[0] += a[i];
if (a[0] != c[0]) printf("DUPA! %f!=%f",a[0],c[0]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
getchar();
return 1;
}
free(a);
//getchar();
return 0;
}
// Helper function for using CUDA to sum vector in parallel.
cudaError_t sumWithCuda(float *c, float *a, unsigned int size, int type)
{
float *dev_a = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaDeviceProp deviceProp;
cudaStatus = cudaGetDeviceProperties(&deviceProp, 0);
if (deviceProp.computeMode == cudaComputeModeProhibited){
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
getchar();
exit(EXIT_SUCCESS);
}
if (cudaStatus != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", cudaStatus, __LINE__);
else printf("GPU Device %d: \"%s\" with compute capability %d.%d MP:%d TH_MUL:%d TH:%d WARP:%d SH_MEM_BLOCK:%d %d\n\n", 0,
deviceProp.name, deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount, deviceProp.maxThreadsPerMultiProcessor, deviceProp.maxThreadsPerBlock, deviceProp.warpSize, deviceProp.sharedMemPerBlock, deviceProp.maxGridSize
);
int threads = size/2;
if (size
>2*block_size) threads = block_size;
int grid = size/threads/2;
// Allocate GPU buffers for 2 vectors (1 input, 1 output).
cudaStatus = cudaMalloc((void**)&dev_c, size / threads * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEvent_t stop;
if ((cudaStatus = cudaEventCreate(&start)) != cudaSuccess){
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = cudaEventCreate(&stop)) != cudaSuccess){
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = cudaEventRecord(start, NULL)) != cudaSuccess){
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);
}
int iter = 1;
for (int i = 0; i < iter; i++) {
if (type == 2)sumKernelStr2<block_size><< <grid, threads >> > (dev_c, dev_a);
if (type == 3)sumKernelStr3<block_size><< <grid, threads >> > (dev_c, dev_a);
while (grid > 1) {
if (grid > 2*block_size) grid /= (block_size*2);
else {
threads = grid/2;
grid = 1;
}
if (type == 2)sumKernelStr2<block_size> << <grid, threads >> > (dev_c, dev_c);
if (type == 3)sumKernelStr3<block_size> << <grid, threads >> > (dev_c, dev_c);
}
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
if ((cudaStatus = cudaEventRecord(stop, NULL)) != cudaSuccess){
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
if ((cudaStatus = cudaEventSynchronize(stop)) != cudaSuccess){
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
float msecTotal = 0.0f;
if ((cudaStatus = cudaEventElapsedTime(&msecTotal, start, stop)) != cudaSuccess){
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(cudaStatus));
exit(EXIT_FAILURE);}
// Compute and print the performance
float msecPerVectorSum = msecTotal / iter;
double flopsPeVectorSum = size;
double gigaFlops = (flopsPeVectorSum * 1.0e-9f) / (msecPerVectorSum / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerVectorSum,
flopsPeVectorSum,
threads);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, 1 * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
}
|
3a33ea1f0443d4dd8bb7480f3c0cd4e5486eb046.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/cast_layer.hpp"
namespace HugeCTR {
namespace {
__global__ void cast_kernel(__half* out, const float* in, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
out[i] = __float2half(__ldg(in + i));
}
}
} // namespace
CastLayer::CastLayer(const TensorPtr<float>& bottom_tensor, const TensorPtr<__half>& top_tensor,
int device_id)
: Layer(device_id) {
assert(get_size_from_dims(bottom_tensor->get_dims()) ==
get_size_from_dims(top_tensor->get_dims()));
bottom_tensor_ = bottom_tensor;
top_tensor_ = top_tensor;
}
void CastLayer::fprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
const float* bottom = bottom_tensor_->get_ptr();
__half* top = top_tensor_->get_ptr();
const size_t threads = 512;
const size_t blocks = ::min((bottom_tensor_->get_num_elements() - 1) / threads + 1, 1024ul);
hipLaunchKernelGGL(( cast_kernel), dim3(blocks), dim3(threads), 0, stream, top, bottom, bottom_tensor_->get_num_elements());
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
void CastLayer::bprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
} // namespace HugeCTR
|
3a33ea1f0443d4dd8bb7480f3c0cd4e5486eb046.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/cast_layer.hpp"
namespace HugeCTR {
namespace {
__global__ void cast_kernel(__half* out, const float* in, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
out[i] = __float2half(__ldg(in + i));
}
}
} // namespace
CastLayer::CastLayer(const TensorPtr<float>& bottom_tensor, const TensorPtr<__half>& top_tensor,
int device_id)
: Layer(device_id) {
assert(get_size_from_dims(bottom_tensor->get_dims()) ==
get_size_from_dims(top_tensor->get_dims()));
bottom_tensor_ = bottom_tensor;
top_tensor_ = top_tensor;
}
void CastLayer::fprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
const float* bottom = bottom_tensor_->get_ptr();
__half* top = top_tensor_->get_ptr();
const size_t threads = 512;
const size_t blocks = std::min((bottom_tensor_->get_num_elements() - 1) / threads + 1, 1024ul);
cast_kernel<<<blocks, threads, 0, stream>>>(top, bottom, bottom_tensor_->get_num_elements());
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
void CastLayer::bprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
} // namespace HugeCTR
|
038586e0f4b0cd451f4270d35f35a7333ffcc8f4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#include <assert.h>
#define N 2//64
__global__ void foo(int *c) {
int b, a;
a = 2;
b = 3;
c[threadIdx.x]= a+b;
__syncthreads ();
}
int main(){
int *a;
int *dev_a;
a = (int*)malloc(N*sizeof(int));
hipMalloc((void**)&dev_a, N*sizeof(int));
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, dev_a);
//ESBMC_verify_kernel(foo,1,N,dev_a);
hipMemcpy(a, dev_a, N*sizeof(int), hipMemcpyDeviceToHost);
for (int t=0;t<N;t++){
printf ("%d ", a[t]);
assert(a[t]==5);
}
hipFree(dev_a);
free(a);
return 0;
}
|
038586e0f4b0cd451f4270d35f35a7333ffcc8f4.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include "cuda_runtime_api.h"
#include <assert.h>
#define N 2//64
__global__ void foo(int *c) {
int b, a;
a = 2;
b = 3;
c[threadIdx.x]= a+b;
__syncthreads ();
}
int main(){
int *a;
int *dev_a;
a = (int*)malloc(N*sizeof(int));
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_a);
//ESBMC_verify_kernel(foo,1,N,dev_a);
cudaMemcpy(a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int t=0;t<N;t++){
printf ("%d ", a[t]);
assert(a[t]==5);
}
cudaFree(dev_a);
free(a);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.