hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ae3257424a9f691967e3ba334a683782c0f61469.hip | // !!! This is a file automatically generated by hipify!!!
#include <helper_math.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include <hip/hip_texture_types.h>
#include <vector>
#include <string>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include "FourD.h"
#include "../constants.h"
#include "../../Util/SaveBMP.h"
__inline__ __host__ __device__ float2 make_float2(float4 a)
{
return make_float2(a.x, a.y);
}
//------------------------------------------------------------------------
// Globals.
//------------------------------------------------------------------------
__constant__ FDStaticParams c_fdStaticParams;
//__constant__ FDDynamicParams c_fdDynamicParams;
__constant__ FDLightPlaneParams c_fdLightPlaneParams;
__constant__ float2 c_fdRectangleSubConstant;
__device__ FDAtomics g_fdAtomics;
__constant__ float3 c_REF_CAL_lightPos[32 * 32];
float3 h_REF_CAL_lightPos[32 * 32];
// from GL
texture<float4, 2, hipReadModeElementType> samplePositionTex;
texture<float4, 2, hipReadModeElementType> sampleNormalTex;
//------------------------------------------------------------------------
// Globals.
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// host Globals.
//------------------------------------------------------------------------
FDStaticParams h_fdStaticParams;
//FDDynamicParams h_fdDynamicParams;
FDLightPlaneParams h_fdLightPlaneParams;
float2 h_fdRectangleSubConstant;
Layered2DSurfaceManager sampleRectangleSurfManager;
// gl interoperate
std::vector<FD::FDModel> FDscene;
//FD::FDSample FDsample;
FD::FDResult FDresult;
// config
bool m_enable_backfacecull = true;
float m_scenes_esp = 1e-6;
int m_viewWidth = 0, m_viewHeight = 0, m_viewWidth_LOG2UP;
int m_maxTriangleNum = 0;
int m_maxVerticesNum = 0;
int m_triangleNum = 0;
int m_verteicesNum = 0;
// 1D m_binNum * m_binNum;
//int m_binNum;
int m_validTriangleNum;// without backFace cull;
int m_validSampleNum;
int m_validBT_PairNum;
int m_validBinNum;
// device info
GPU_INFO my_device;
int m_maxShadowCalWarpNumPerBlock;
//int m_CTA_num = 1;
//int m_deviceID = 0;
//int m_maxGridDimX = 0;
//int m_maxBlockDimX = 0;
// buffers
//FD::Buffer<float4> verticesRectangleBuffer;
//FD::Buffer<float4> triVertexBuffer;
FD::Buffer<float4> triDataBuffer;
//FD::Buffer<float4> sampleDatabuffer;
FD::Buffer<float4> triAABBBuffer;
FD::Buffer<struct BinRange> triBinRangeBuffer;
FD::Buffer<int> binTriStartBuffer;
FD::Buffer<int> binTriEndBuffer;
FD::Buffer<int> binTriPairBinBuffer; //
FD::Buffer<int> binTriPairTriBuffer; //
FD::Buffer<int> triPairNumBuffer;
FD::Buffer<int> triPairNumPrefixSumBuffer;
FD::Buffer<int> binSampleStartBuffer;
FD::Buffer<int> binSampleEndBuffer;
FD::Buffer<float> binSampleMaxZBuffer;
FD::Buffer<float4> binSampleMinRangeBuffer;
FD::Buffer<int> binSamplePairBinBuffer;
FD::Buffer<int> binSamplePairSampleBuffer;
FD::Buffer<int> isBinValidBuffer;
FD::Buffer<int> isBinValidPrefixSumBuffer;
FD::Buffer<int> validBinBuffer;
FD::Buffer<float> AABB3DReduceBuffer;
//valid buffer
FD::Buffer<int> validBuffer;
//// linked- list;
//
//FD::LinkedList<unsigned int> sampleTileLinkedList;
// aabb
float4 modelRectangleAABB[2];
float4 sampleRectangleAABB[2];
// global atomic adrress;
void * g_fdAtomics_addr;
void * g_fdCounters_addr;
/// sample position;
namespace FDSample{
GLuint GLsamplePositionTex;
GLuint GLsampleNormalTex;
cudaGraphicsResource * positionRes;
hipArray_t positionArray;
cudaGraphicsResource * normalRes;
hipArray_t normalArray;
void setGLtexture(GLuint positionTex, GLuint normalTex)
{
GLsamplePositionTex = positionTex;
GLsampleNormalTex = normalTex;
}
void registerGLtexture()
{
checkCudaErrors(hipGraphicsGLRegisterImage(&positionRes, GLsamplePositionTex, GL_TEXTURE_2D, hipGraphicsMapFlagsNone));
checkCudaErrors(hipGraphicsGLRegisterImage(&normalRes, GLsampleNormalTex, GL_TEXTURE_2D, hipGraphicsMapFlagsNone));
}
void unregisterGLtexture()
{
checkCudaErrors(hipGraphicsUnregisterResource(positionRes));
checkCudaErrors(hipGraphicsUnregisterResource(normalRes));
}
void mapGLtexture()
{
checkCudaErrors(hipGraphicsMapResources(1, &positionRes, NULL));
checkCudaErrors(hipGraphicsSubResourceGetMappedArray(&positionArray, positionRes, 0, 0));
// 2D
samplePositionTex.addressMode[0] = hipAddressModeWrap;
samplePositionTex.addressMode[1] = hipAddressModeWrap;
samplePositionTex.filterMode = hipFilterModePoint;
samplePositionTex.normalized = false; // access with normalized texture coordinates
// CUDA
checkCudaErrors(hipBindTextureToArray(samplePositionTex, positionArray, FD::float4_channelDesc));
checkCudaErrors(hipGraphicsMapResources(1, &normalRes, NULL));
checkCudaErrors(hipGraphicsSubResourceGetMappedArray(&normalArray, normalRes, 0, 0));
// 2D
sampleNormalTex.addressMode[0] = hipAddressModeWrap;
sampleNormalTex.addressMode[1] = hipAddressModeWrap;
sampleNormalTex.filterMode = hipFilterModePoint;
sampleNormalTex.normalized = false; // access with normalized texture coordinates
// CUDA
checkCudaErrors(hipBindTextureToArray(sampleNormalTex, normalArray, FD::float4_channelDesc));
}
void unmapGLtexture()
{
checkCudaErrors(hipGraphicsUnmapResources(1, &positionRes, NULL));
checkCudaErrors(hipUnbindTexture(samplePositionTex));
checkCudaErrors(hipGraphicsUnmapResources(1, &normalRes, NULL));
checkCudaErrors(hipUnbindTexture(sampleNormalTex));
}
}
extern "C" void inline getG_Atomic_addr()
{
checkCudaErrors(hipGetSymbolAddress((void **)&g_fdAtomics_addr, g_fdAtomics));
}
extern "C" void inline setStaticParams()
{
checkCudaErrors(hipMemcpyToSymbol(c_fdStaticParams, (void *)&h_fdStaticParams, sizeof(FDStaticParams)));
}
//extern "C" void inline setDynamicParams()
//{
// checkCudaErrors(hipMemcpyToSymbol(c_fdDynamicParams, (void *)&h_fdDynamicParams, sizeof(FDDynamicParams)));
//}
extern "C" void inline setLightPlaneParams()
{
checkCudaErrors(hipMemcpyToSymbol(c_fdLightPlaneParams, &(h_fdLightPlaneParams), sizeof(FDLightPlaneParams), 0, hipMemcpyHostToDevice));
}
extern "C" void inline setRectangleSubConstant()
{
checkCudaErrors(hipMemcpyToSymbol(c_fdRectangleSubConstant, &(h_fdRectangleSubConstant), sizeof(float2), 0, hipMemcpyHostToDevice));
}
extern "C" void inline setRefLightPos()
{
checkCudaErrors(hipMemcpyToSymbol(c_REF_CAL_lightPos, &h_REF_CAL_lightPos, sizeof(c_REF_CAL_lightPos), 0, hipMemcpyHostToDevice));
}
//for release
//
#include "FDprojection.inl"
#include "blockAABBReduce.inl"
#include "binRaster.inl"
// step 0
#include "Step0_triangleSetup.inl"
// step1 defer rendering scene
// step 2 define Frame buffer
#include "Step2_3_defineFDbuffer.inl"
// step 3 bind sample
#include "Step3_bindSampleToBin.inl"
// step 4 raster triangle
#include "Step4_RasterizeTriangleToBin.inl"
// step 5 cal shadow
#include "Step5_1_prepareBin.inl"
#include "Step5_2_shadowCal.inl"
#include "Step5_2_calRef.inl"
#include "Step5_2_shadowCalPerSample.inl"
#include "moveModel.inl"
| ae3257424a9f691967e3ba334a683782c0f61469.cu | #include <helper_math.h>
#include <helper_cuda.h>
#include <cuda_runtime.h>
#include <cuda_texture_types.h>
#include <vector>
#include <string>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include "FourD.h"
#include "../constants.h"
#include "../../Util/SaveBMP.h"
__inline__ __host__ __device__ float2 make_float2(float4 a)
{
return make_float2(a.x, a.y);
}
//------------------------------------------------------------------------
// Globals.
//------------------------------------------------------------------------
__constant__ FDStaticParams c_fdStaticParams;
//__constant__ FDDynamicParams c_fdDynamicParams;
__constant__ FDLightPlaneParams c_fdLightPlaneParams;
__constant__ float2 c_fdRectangleSubConstant;
__device__ FDAtomics g_fdAtomics;
__constant__ float3 c_REF_CAL_lightPos[32 * 32];
float3 h_REF_CAL_lightPos[32 * 32];
// from GL 全局常量
texture<float4, 2, cudaReadModeElementType> samplePositionTex;
texture<float4, 2, cudaReadModeElementType> sampleNormalTex;
//------------------------------------------------------------------------
// Globals.
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// host Globals.
//------------------------------------------------------------------------
FDStaticParams h_fdStaticParams;
//FDDynamicParams h_fdDynamicParams;
FDLightPlaneParams h_fdLightPlaneParams;
float2 h_fdRectangleSubConstant;
Layered2DSurfaceManager sampleRectangleSurfManager;
// gl interoperate
std::vector<FD::FDModel> FDscene;
//FD::FDSample FDsample;
FD::FDResult FDresult;
// config
bool m_enable_backfacecull = true;
float m_scenes_esp = 1e-6;
int m_viewWidth = 0, m_viewHeight = 0, m_viewWidth_LOG2UP;
int m_maxTriangleNum = 0;
int m_maxVerticesNum = 0;
int m_triangleNum = 0;
int m_verteicesNum = 0;
// 1D, 真正个数为 m_binNum * m_binNum;
//int m_binNum;
int m_validTriangleNum;// without backFace cull;
int m_validSampleNum;
int m_validBT_PairNum;
int m_validBinNum;
// device info
GPU_INFO my_device;
int m_maxShadowCalWarpNumPerBlock;
//int m_CTA_num = 1;
//int m_deviceID = 0;
//int m_maxGridDimX = 0;
//int m_maxBlockDimX = 0;
// buffers
//FD::Buffer<float4> verticesRectangleBuffer;
//FD::Buffer<float4> triVertexBuffer;
FD::Buffer<float4> triDataBuffer;
//FD::Buffer<float4> sampleDatabuffer;
FD::Buffer<float4> triAABBBuffer;
FD::Buffer<struct BinRange> triBinRangeBuffer;
FD::Buffer<int> binTriStartBuffer;
FD::Buffer<int> binTriEndBuffer;
FD::Buffer<int> binTriPairBinBuffer; //动态开辟
FD::Buffer<int> binTriPairTriBuffer; //动态开辟
FD::Buffer<int> triPairNumBuffer;
FD::Buffer<int> triPairNumPrefixSumBuffer;
FD::Buffer<int> binSampleStartBuffer;
FD::Buffer<int> binSampleEndBuffer;
FD::Buffer<float> binSampleMaxZBuffer;
FD::Buffer<float4> binSampleMinRangeBuffer;
FD::Buffer<int> binSamplePairBinBuffer;
FD::Buffer<int> binSamplePairSampleBuffer;
FD::Buffer<int> isBinValidBuffer;
FD::Buffer<int> isBinValidPrefixSumBuffer;
FD::Buffer<int> validBinBuffer;
FD::Buffer<float> AABB3DReduceBuffer;
//valid buffer
FD::Buffer<int> validBuffer;
//// linked- list;
//
//FD::LinkedList<unsigned int> sampleTileLinkedList;
// aabb
float4 modelRectangleAABB[2];
float4 sampleRectangleAABB[2];
// global atomic adrress;
void * g_fdAtomics_addr;
void * g_fdCounters_addr;
/// sample position;
namespace FDSample{
GLuint GLsamplePositionTex;
GLuint GLsampleNormalTex;
cudaGraphicsResource * positionRes;
cudaArray_t positionArray;
cudaGraphicsResource * normalRes;
cudaArray_t normalArray;
void setGLtexture(GLuint positionTex, GLuint normalTex)
{
GLsamplePositionTex = positionTex;
GLsampleNormalTex = normalTex;
}
void registerGLtexture()
{
checkCudaErrors(cudaGraphicsGLRegisterImage(&positionRes, GLsamplePositionTex, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone));
checkCudaErrors(cudaGraphicsGLRegisterImage(&normalRes, GLsampleNormalTex, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone));
}
void unregisterGLtexture()
{
checkCudaErrors(cudaGraphicsUnregisterResource(positionRes));
checkCudaErrors(cudaGraphicsUnregisterResource(normalRes));
}
void mapGLtexture()
{
checkCudaErrors(cudaGraphicsMapResources(1, &positionRes, NULL));
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&positionArray, positionRes, 0, 0));
// 2D 纹理
samplePositionTex.addressMode[0] = cudaAddressModeWrap;
samplePositionTex.addressMode[1] = cudaAddressModeWrap;
samplePositionTex.filterMode = cudaFilterModePoint;
samplePositionTex.normalized = false; // access with normalized texture coordinates
// 绑定到CUDA纹理
checkCudaErrors(cudaBindTextureToArray(samplePositionTex, positionArray, FD::float4_channelDesc));
checkCudaErrors(cudaGraphicsMapResources(1, &normalRes, NULL));
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&normalArray, normalRes, 0, 0));
// 2D 纹理
sampleNormalTex.addressMode[0] = cudaAddressModeWrap;
sampleNormalTex.addressMode[1] = cudaAddressModeWrap;
sampleNormalTex.filterMode = cudaFilterModePoint;
sampleNormalTex.normalized = false; // access with normalized texture coordinates
// 绑定到CUDA纹理
checkCudaErrors(cudaBindTextureToArray(sampleNormalTex, normalArray, FD::float4_channelDesc));
}
void unmapGLtexture()
{
checkCudaErrors(cudaGraphicsUnmapResources(1, &positionRes, NULL));
checkCudaErrors(cudaUnbindTexture(samplePositionTex));
checkCudaErrors(cudaGraphicsUnmapResources(1, &normalRes, NULL));
checkCudaErrors(cudaUnbindTexture(sampleNormalTex));
}
}
extern "C" void inline getG_Atomic_addr()
{
checkCudaErrors(cudaGetSymbolAddress((void **)&g_fdAtomics_addr, g_fdAtomics));
}
extern "C" void inline setStaticParams()
{
checkCudaErrors(cudaMemcpyToSymbol(c_fdStaticParams, (void *)&h_fdStaticParams, sizeof(FDStaticParams)));
}
//extern "C" void inline setDynamicParams()
//{
// checkCudaErrors(cudaMemcpyToSymbol(c_fdDynamicParams, (void *)&h_fdDynamicParams, sizeof(FDDynamicParams)));
//}
extern "C" void inline setLightPlaneParams()
{
checkCudaErrors(cudaMemcpyToSymbol(c_fdLightPlaneParams, &(h_fdLightPlaneParams), sizeof(FDLightPlaneParams), 0, cudaMemcpyHostToDevice));
}
extern "C" void inline setRectangleSubConstant()
{
checkCudaErrors(cudaMemcpyToSymbol(c_fdRectangleSubConstant, &(h_fdRectangleSubConstant), sizeof(float2), 0, cudaMemcpyHostToDevice));
}
extern "C" void inline setRefLightPos()
{
checkCudaErrors(cudaMemcpyToSymbol(c_REF_CAL_lightPos, &h_REF_CAL_lightPos, sizeof(c_REF_CAL_lightPos), 0, cudaMemcpyHostToDevice));
}
//for release
// 公共函数
#include "FDprojection.inl"
#include "blockAABBReduce.inl"
#include "binRaster.inl"
// step 0
#include "Step0_triangleSetup.inl"
// step1 defer rendering scene
// step 2 define Frame buffer
#include "Step2_3_defineFDbuffer.inl"
// step 3 bind sample
#include "Step3_bindSampleToBin.inl"
// step 4 raster triangle
#include "Step4_RasterizeTriangleToBin.inl"
// step 5 cal shadow
#include "Step5_1_prepareBin.inl"
#include "Step5_2_shadowCal.inl"
#include "Step5_2_calRef.inl"
#include "Step5_2_shadowCalPerSample.inl"
#include "moveModel.inl"
|
956a7d395aaa717c240662cac3d20b28639fe3f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void KerSortDataParticles(unsigned n,unsigned pini,const unsigned *sortpart,const float *a,float *a2)
{
const unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Particle number.
if(p<n){
const unsigned oldpos=(p<pini? p: sortpart[p]);
a2[p]=a[oldpos];
}
} | 956a7d395aaa717c240662cac3d20b28639fe3f1.cu | #include "includes.h"
__global__ void KerSortDataParticles(unsigned n,unsigned pini,const unsigned *sortpart,const float *a,float *a2)
{
const unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Particle number.
if(p<n){
const unsigned oldpos=(p<pini? p: sortpart[p]);
a2[p]=a[oldpos];
}
} |
0fca661f1388f05769af3501ddff45c0e15dffa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ParticleData.cu
* \brief Defines GPU functions and kernels used by mpcd::ParticleData
*/
#ifdef ENABLE_MPI
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#if __CUDACC_VER_MAJOR__ >= 11
#include <cub/device/device_partition.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#else
#include "hoomd/extern/cub/cub/device/device_partition.cuh"
#include "hoomd/extern/cub/cub/iterator/counting_input_iterator.cuh"
#endif
#pragma GCC diagnostic pop
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to partition particle data
/*!
* \param d_out Packed output buffer
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Communication flags (nonzero if particle should be migrated)
* \param d_remove_ids Partitioned indexes of particles to remove (first) followed by keep (last)
* \param n_remove Number of particles to remove
* \param N Number of local particles
*
* Particles are removed using the result of cub::DevicePartition, which constructs
* a list of particles to keep and remove.
*/
__global__ void remove_particles(mpcd::detail::pdata_element *d_out,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const unsigned int *d_remove_ids,
const unsigned int n_remove,
const unsigned int N)
{
// one thread per particle
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n_remove) return;
const unsigned int pid = d_remove_ids[idx];
// pack a comm element
mpcd::detail::pdata_element p;
p.pos = d_pos[pid];
p.vel = d_vel[pid];
p.tag = d_tag[pid];
p.comm_flag = d_comm_flags[pid];
d_out[idx] = p;
// now fill myself back in with another particle if that exists
idx += n_remove;
if (idx >= N) return;
const unsigned int take_pid = d_remove_ids[idx];
d_pos[pid] = d_pos[take_pid];
d_vel[pid] = d_vel[take_pid];
d_tag[pid] = d_tag[take_pid];
d_comm_flags[pid] = d_comm_flags[take_pid];
}
//! Kernel to transform communication flags for prefix sum
/*!
* \param d_remove_flags Flag to remove (1) or keep (0) a particle (output)
* \param d_comm_flags Communication flags
* \param mask Bitwise mask for \a d_comm_flags
* \param N Number of local particles
*
* Any communication flags that are bitwise AND with \a mask are transformed to
* a 1 and stored in \a d_remove_flags, otherwise a 0 is set.
*/
__global__ void mark_removed_particles(unsigned char *d_remove_flags,
const unsigned int *d_comm_flags,
const unsigned int mask,
const unsigned int N)
{
// one thread per particle
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) return;
d_remove_flags[idx] = (d_comm_flags[idx] & mask) ? 1 : 0;
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_remove_flags Flag to remove (1) or keep (0) a particle (output)
* \param d_comm_flags Communication flags
* \param mask Bitwise mask for \a d_comm_flags
* \param N Number of local particles
* \param block_size Number of threads per block
*
* \sa mpcd::gpu::kernel::mark_removed_particles
*/
hipError_t mpcd::gpu::mark_removed_particles(unsigned char *d_remove_flags,
const unsigned int *d_comm_flags,
const unsigned int mask,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::mark_removed_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::mark_removed_particles), dim3(grid), dim3(run_block_size), 0, 0, d_remove_flags,
d_comm_flags,
mask,
N);
return hipSuccess;
}
/*!
* \param d_tmp Temporary storage
* \param tmp_bytes Number of bytes in temporary storage
* \param d_remove_flags Flags to remove (1) or keep (0) particles
* \param d_remove_ids Partitioned indexes of particles to remove (first) or keep (last)
* \param d_num_remove Number of particles to remove
* \param N Number of particles
*
* \returns hipSuccess on completion
*
* \b Implementation
* This is a wrapper to a cub::DevicePartition::Flagged, and as such requires
* two calls in order for the partitioning to take effect. In the first call,
* temporary storage is sized and returned in \a tmp_bytes. The caller must then
* allocate this memory into \a d_tmp, and call the method a second time. The
* particle indexes are then partitioned into \a d_remove_ids, with
* the particles to remove first in the array (in their original order), while
* the kept particles are put into a reverse order at the end of the array.
* The number of particles to keep is stored into \a d_num_remove.
*/
hipError_t mpcd::gpu::partition_particles(void *d_tmp,
size_t& tmp_bytes,
const unsigned char *d_remove_flags,
unsigned int *d_remove_ids,
unsigned int *d_num_remove,
const unsigned int N)
{
hipcub::CountingInputIterator<unsigned int> ids(0);
cub::DevicePartition::Flagged(d_tmp, tmp_bytes, ids, d_remove_flags, d_remove_ids, d_num_remove, N);
return hipSuccess;
}
/*!
* \param d_out Output array for packed particle data
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_pos_alt Device array of particle positions (output)
* \param d_vel_alt Device array of particle velocities (output)
* \param d_tag_alt Device array of particle tags (output)
* \param d_comm_flags_alt Device array of communication flags (output)
* \param d_remove_ids Partitioned indexes of particles to remove (first) or keep (last)
* \param n_remove Number of particles to remove
* \param N Current number of particles
* \param block_size Number of threads per block
*
* \returns hipSuccess on completion.
*
* \sa mpcd::gpu::kernel::remove_particles
*/
hipError_t mpcd::gpu::remove_particles(mpcd::detail::pdata_element *d_out,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
unsigned int *d_remove_ids,
const unsigned int n_remove,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::remove_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(n_remove / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::remove_particles), dim3(grid), dim3(run_block_size), 0, 0, d_out,
d_pos,
d_vel,
d_tag,
d_comm_flags,
d_remove_ids,
n_remove,
N);
return hipSuccess;
}
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to partition particle data
/*!
* \param old_nparticles old local particle count
* \param num_add_ptls Number of particles in input array
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_in Device array of packed input particle data
* \param mask Bitwise mask for received particles to unmask
*
* Particle data is appended to the end of the particle data arrays from the
* packed buffer. Communication flags of new particles are unmasked.
*/
__global__ void add_particles(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const mpcd::detail::pdata_element *d_in,
const unsigned int mask)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_add_ptls) return;
mpcd::detail::pdata_element p = d_in[idx];
unsigned int add_idx = old_nparticles + idx;
d_pos[add_idx] = p.pos;
d_vel[add_idx] = p.vel;
d_tag[add_idx] = p.tag;
d_comm_flags[add_idx] = p.comm_flag & ~mask;
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param old_nparticles old local particle count
* \param num_add_ptls Number of particles in input array
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_in Device array of packed input particle data
* \param mask Bitwise mask for received particles to unmask
* \param block_size Number of threads per block
*
* Particle data is appended to the end of the particle data arrays from the
* packed buffer. Communication flags of new particles are unmasked.
*/
void mpcd::gpu::add_particles(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const mpcd::detail::pdata_element *d_in,
const unsigned int mask,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::add_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(num_add_ptls / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::add_particles), dim3(grid), dim3(run_block_size), 0, 0, old_nparticles,
num_add_ptls,
d_pos,
d_vel,
d_tag,
d_comm_flags,
d_in,
mask);
}
#endif // ENABLE_MPI
| 0fca661f1388f05769af3501ddff45c0e15dffa5.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ParticleData.cu
* \brief Defines GPU functions and kernels used by mpcd::ParticleData
*/
#ifdef ENABLE_MPI
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#if __CUDACC_VER_MAJOR__ >= 11
#include <cub/device/device_partition.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#else
#include "hoomd/extern/cub/cub/device/device_partition.cuh"
#include "hoomd/extern/cub/cub/iterator/counting_input_iterator.cuh"
#endif
#pragma GCC diagnostic pop
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to partition particle data
/*!
* \param d_out Packed output buffer
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Communication flags (nonzero if particle should be migrated)
* \param d_remove_ids Partitioned indexes of particles to remove (first) followed by keep (last)
* \param n_remove Number of particles to remove
* \param N Number of local particles
*
* Particles are removed using the result of cub::DevicePartition, which constructs
* a list of particles to keep and remove.
*/
__global__ void remove_particles(mpcd::detail::pdata_element *d_out,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const unsigned int *d_remove_ids,
const unsigned int n_remove,
const unsigned int N)
{
// one thread per particle
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= n_remove) return;
const unsigned int pid = d_remove_ids[idx];
// pack a comm element
mpcd::detail::pdata_element p;
p.pos = d_pos[pid];
p.vel = d_vel[pid];
p.tag = d_tag[pid];
p.comm_flag = d_comm_flags[pid];
d_out[idx] = p;
// now fill myself back in with another particle if that exists
idx += n_remove;
if (idx >= N) return;
const unsigned int take_pid = d_remove_ids[idx];
d_pos[pid] = d_pos[take_pid];
d_vel[pid] = d_vel[take_pid];
d_tag[pid] = d_tag[take_pid];
d_comm_flags[pid] = d_comm_flags[take_pid];
}
//! Kernel to transform communication flags for prefix sum
/*!
* \param d_remove_flags Flag to remove (1) or keep (0) a particle (output)
* \param d_comm_flags Communication flags
* \param mask Bitwise mask for \a d_comm_flags
* \param N Number of local particles
*
* Any communication flags that are bitwise AND with \a mask are transformed to
* a 1 and stored in \a d_remove_flags, otherwise a 0 is set.
*/
__global__ void mark_removed_particles(unsigned char *d_remove_flags,
const unsigned int *d_comm_flags,
const unsigned int mask,
const unsigned int N)
{
// one thread per particle
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) return;
d_remove_flags[idx] = (d_comm_flags[idx] & mask) ? 1 : 0;
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_remove_flags Flag to remove (1) or keep (0) a particle (output)
* \param d_comm_flags Communication flags
* \param mask Bitwise mask for \a d_comm_flags
* \param N Number of local particles
* \param block_size Number of threads per block
*
* \sa mpcd::gpu::kernel::mark_removed_particles
*/
cudaError_t mpcd::gpu::mark_removed_particles(unsigned char *d_remove_flags,
const unsigned int *d_comm_flags,
const unsigned int mask,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::mark_removed_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
mpcd::gpu::kernel::mark_removed_particles<<<grid, run_block_size>>>(d_remove_flags,
d_comm_flags,
mask,
N);
return cudaSuccess;
}
/*!
* \param d_tmp Temporary storage
* \param tmp_bytes Number of bytes in temporary storage
* \param d_remove_flags Flags to remove (1) or keep (0) particles
* \param d_remove_ids Partitioned indexes of particles to remove (first) or keep (last)
* \param d_num_remove Number of particles to remove
* \param N Number of particles
*
* \returns cudaSuccess on completion
*
* \b Implementation
* This is a wrapper to a cub::DevicePartition::Flagged, and as such requires
* two calls in order for the partitioning to take effect. In the first call,
* temporary storage is sized and returned in \a tmp_bytes. The caller must then
* allocate this memory into \a d_tmp, and call the method a second time. The
* particle indexes are then partitioned into \a d_remove_ids, with
* the particles to remove first in the array (in their original order), while
* the kept particles are put into a reverse order at the end of the array.
* The number of particles to keep is stored into \a d_num_remove.
*/
cudaError_t mpcd::gpu::partition_particles(void *d_tmp,
size_t& tmp_bytes,
const unsigned char *d_remove_flags,
unsigned int *d_remove_ids,
unsigned int *d_num_remove,
const unsigned int N)
{
cub::CountingInputIterator<unsigned int> ids(0);
cub::DevicePartition::Flagged(d_tmp, tmp_bytes, ids, d_remove_flags, d_remove_ids, d_num_remove, N);
return cudaSuccess;
}
/*!
* \param d_out Output array for packed particle data
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_pos_alt Device array of particle positions (output)
* \param d_vel_alt Device array of particle velocities (output)
* \param d_tag_alt Device array of particle tags (output)
* \param d_comm_flags_alt Device array of communication flags (output)
* \param d_remove_ids Partitioned indexes of particles to remove (first) or keep (last)
* \param n_remove Number of particles to remove
* \param N Current number of particles
* \param block_size Number of threads per block
*
* \returns cudaSuccess on completion.
*
* \sa mpcd::gpu::kernel::remove_particles
*/
cudaError_t mpcd::gpu::remove_particles(mpcd::detail::pdata_element *d_out,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
unsigned int *d_remove_ids,
const unsigned int n_remove,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::remove_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(n_remove / run_block_size + 1);
mpcd::gpu::kernel::remove_particles<<<grid, run_block_size>>>(d_out,
d_pos,
d_vel,
d_tag,
d_comm_flags,
d_remove_ids,
n_remove,
N);
return cudaSuccess;
}
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Kernel to partition particle data
/*!
* \param old_nparticles old local particle count
* \param num_add_ptls Number of particles in input array
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_in Device array of packed input particle data
* \param mask Bitwise mask for received particles to unmask
*
* Particle data is appended to the end of the particle data arrays from the
* packed buffer. Communication flags of new particles are unmasked.
*/
__global__ void add_particles(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const mpcd::detail::pdata_element *d_in,
const unsigned int mask)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_add_ptls) return;
mpcd::detail::pdata_element p = d_in[idx];
unsigned int add_idx = old_nparticles + idx;
d_pos[add_idx] = p.pos;
d_vel[add_idx] = p.vel;
d_tag[add_idx] = p.tag;
d_comm_flags[add_idx] = p.comm_flag & ~mask;
}
} // end namespace kernel
} // end namespace gpu
} // end namespace mpcd
/*!
* \param old_nparticles old local particle count
* \param num_add_ptls Number of particles in input array
* \param d_pos Device array of particle positions
* \param d_vel Device array of particle velocities
* \param d_tag Device array of particle tags
* \param d_comm_flags Device array of communication flags
* \param d_in Device array of packed input particle data
* \param mask Bitwise mask for received particles to unmask
* \param block_size Number of threads per block
*
* Particle data is appended to the end of the particle data arrays from the
* packed buffer. Communication flags of new particles are unmasked.
*/
void mpcd::gpu::add_particles(unsigned int old_nparticles,
unsigned int num_add_ptls,
Scalar4 *d_pos,
Scalar4 *d_vel,
unsigned int *d_tag,
unsigned int *d_comm_flags,
const mpcd::detail::pdata_element *d_in,
const unsigned int mask,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::add_particles);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(num_add_ptls / run_block_size + 1);
mpcd::gpu::kernel::add_particles<<<grid, run_block_size>>>(old_nparticles,
num_add_ptls,
d_pos,
d_vel,
d_tag,
d_comm_flags,
d_in,
mask);
}
#endif // ENABLE_MPI
|
eee8abf2af3bf90adcc8ba125117e8c787a5c9cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <climits>
#define SERIAL_SCALE 2
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void kernelMain(int *input, int *output){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int mem[1024];
int m=input[thid*SERIAL_PART];
for(unsigned int i=1;i<SERIAL_PART;++i)
{
int t=input[thid*SERIAL_PART+i];
if(t<m)
m=t;
}
mem[threadIdx.x]=m;
__syncthreads();
for(unsigned int shift=1;shift<1024;shift*=2)
{
int val=mem[threadIdx.x];
if(threadIdx.x>=shift)
{
if(val>mem[threadIdx.x-shift])
val=mem[threadIdx.x-shift];
}
__syncthreads();
mem[threadIdx.x]=val;
}
if(threadIdx.x==1023)
output[blockIdx.x]=mem[1023];
}
__global__ void kernelPrepare(int *input, int *output, int* args)
{
const unsigned int count=args[0];
const unsigned int n=args[1];
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(thid*count>=n)
return;
int m=input[thid*count];
for(unsigned int i=1;i<count && thid*count+i<n;++i)
{
if(m>input[thid*count+i])
m=input[thid*count+i];
}
output[thid]=m;
}
}
| eee8abf2af3bf90adcc8ba125117e8c787a5c9cd.cu | #include <cstdio>
#include <climits>
#define SERIAL_SCALE 2
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void kernelMain(int *input, int *output){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int mem[1024];
int m=input[thid*SERIAL_PART];
for(unsigned int i=1;i<SERIAL_PART;++i)
{
int t=input[thid*SERIAL_PART+i];
if(t<m)
m=t;
}
mem[threadIdx.x]=m;
__syncthreads();
for(unsigned int shift=1;shift<1024;shift*=2)
{
int val=mem[threadIdx.x];
if(threadIdx.x>=shift)
{
if(val>mem[threadIdx.x-shift])
val=mem[threadIdx.x-shift];
}
__syncthreads();
mem[threadIdx.x]=val;
}
if(threadIdx.x==1023)
output[blockIdx.x]=mem[1023];
}
__global__ void kernelPrepare(int *input, int *output, int* args)
{
const unsigned int count=args[0];
const unsigned int n=args[1];
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(thid*count>=n)
return;
int m=input[thid*count];
for(unsigned int i=1;i<count && thid*count+i<n;++i)
{
if(m>input[thid*count+i])
m=input[thid*count+i];
}
output[thid]=m;
}
}
|
ef1e976d0b5a0e9565bd2da8409ed04796089413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
if (comp < (-0.0f / fmodf(+1.8753E-41f + var_2 / -1.0982E-43f, (+1.1450E-41f / var_3)))) {
float tmp_1 = (var_4 - (+1.3056E34f / -1.7195E35f));
comp += tmp_1 * var_5 * (var_6 / asinf(-1.2615E25f));
comp += var_7 - -1.8671E-24f;
for (int i=0; i < var_1; ++i) {
comp = log10f(var_8 / (var_9 + tanhf(ceilf(-0.0f))));
comp += sinhf((var_10 / var_11 / -0.0f));
}
if (comp > var_12 * var_13) {
float tmp_2 = -1.4037E-37f;
comp += tmp_2 / (var_14 - (var_15 / asinf((-1.5623E-19f + var_16))));
comp += powf(tanhf((var_17 + (+1.3386E-41f - -1.2844E-35f + (var_18 - +1.2629E18f)))), (var_19 / var_20 - -1.0365E-42f * +0.0f * -0.0f));
comp = (var_21 - -0.0f * var_22 - +1.5954E36f + +0.0f / var_23);
}
if (comp < +1.9744E-36f - -1.6597E9f + +0.0f) {
comp += (var_24 - var_25 / -1.6978E-42f / var_26);
float tmp_3 = +1.3585E-35f - (-0.0f / +0.0f);
comp += tmp_3 * (+1.5991E34f + atan2f(+1.9960E-37f, -0.0f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
hipDeviceSynchronize();
return 0;
}
| ef1e976d0b5a0e9565bd2da8409ed04796089413.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
if (comp < (-0.0f / fmodf(+1.8753E-41f + var_2 / -1.0982E-43f, (+1.1450E-41f / var_3)))) {
float tmp_1 = (var_4 - (+1.3056E34f / -1.7195E35f));
comp += tmp_1 * var_5 * (var_6 / asinf(-1.2615E25f));
comp += var_7 - -1.8671E-24f;
for (int i=0; i < var_1; ++i) {
comp = log10f(var_8 / (var_9 + tanhf(ceilf(-0.0f))));
comp += sinhf((var_10 / var_11 / -0.0f));
}
if (comp > var_12 * var_13) {
float tmp_2 = -1.4037E-37f;
comp += tmp_2 / (var_14 - (var_15 / asinf((-1.5623E-19f + var_16))));
comp += powf(tanhf((var_17 + (+1.3386E-41f - -1.2844E-35f + (var_18 - +1.2629E18f)))), (var_19 / var_20 - -1.0365E-42f * +0.0f * -0.0f));
comp = (var_21 - -0.0f * var_22 - +1.5954E36f + +0.0f / var_23);
}
if (comp < +1.9744E-36f - -1.6597E9f + +0.0f) {
comp += (var_24 - var_25 / -1.6978E-42f / var_26);
float tmp_3 = +1.3585E-35f - (-0.0f / +0.0f);
comp += tmp_3 * (+1.5991E34f + atan2f(+1.9960E-37f, -0.0f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
65cab7a443e1a299545f2e019f2e11652265f21e.hip | // !!! This is a file automatically generated by hipify!!!
// Let the GPU perform a 2-dimension convolution over filter KIO on tensor TBC
// To compile: nvcc -arch=sm_70 -std=c++14 -o conv_tbc cudnnconvTBC.cu -lcudnn
// Example command: ./conv_tbc testW testB testI testL
// Last modified: Bambo Wu 05/12/2019
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include <cstdlib>
#include <cudnn.h>
std::string CONV_FWD_ALGO[8] = {
"IMPLICIT_GEMM",
"IMPLICIT_PRECOMP_GEMM",
"GEMM",
"DIRECT",
"FFT",
"FFT_TILING",
"WINOGRAD",
"WINOGRAD_NONFUSED"};
std::string FILTER_BWD_ALGO[6] = {
"BWD_FILTER_ALGO_0",
"BWD_FILTER_ALGO_1",
"FFT",
"BWD_FILTER_ALGO_3",
"WINOGRAD_NONFUSED",
"FFT_TILING"};
std::string DATA_BWD_ALGO[6] = {
"BWD_DATA_ALGO_0",
"BWD_DATA_ALGO_1",
"FFT",
"FFT_TILING",
"WINOGRAD",
"WINOGRAD_NONFUSED"};
//function to print out error message from CUDA calls
#define checkCUDA(exp) \
{ \
hipError_t status = (exp); \
if(status != hipSuccess) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< hipGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
//function to print out error message from cuDNN calls
#define checkCUDNN(exp) \
{ \
cudnnStatus_t status = (exp); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
int offsetTBC(int *dims, int *coo) {
//int T = dims[0];
int B = dims[1];
int C = dims[2];
int t = coo[0];
int b = coo[1];
int c = coo[2];
return c + b * C + t * B * C;
}
int offsetKIO(int *dims, int *coo) {
//int K = dims[0];
int I = dims[1];
int O = dims[2];
int k = coo[0];
int i = coo[1];
int o = coo[2];
return o + i * O + k * I * O;
}
// reorganize weights from KIO to OIK, Win, Wout should no overlap
void reorg_weights(int *Wdims, float *Win, float *Wout) {
int offset = 0;
for (int k = 0; k < Wdims[0]; ++k) {
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
int offset_new = k + in_ch * Wdims[0] + out_ch * Wdims[0] * Wdims[1];
Wout[offset_new] = Win[offset++];
}
}
}
}
// reorganize loss from TBC to NCHW, Lin, Lout should no overlap
void reorg_loss(int *Ldims, float *Lin, float *Lout) {
int offset = 0;
for (int t = 0; t < Ldims[0]; ++t) {
for (int batch = 0; batch < Ldims[1]; ++batch) {
for (int ch = 0; ch < Ldims[2]; ++ch) {
int offset_new = t + ch * Ldims[0] + batch * Ldims[0] * Ldims[2];
Lout[offset_new] = Lin[offset++];
}
}
}
}
//// reorganize weights from KIO to OKI, Win, Wout should no overlap
//void reorg_weights(int *Wdims, float *Win, float *Wout) {
// int offset = 0;
// for (int k = 0; k < Wdims[0]; ++k) {
// for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
// for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
// int offset_new = in_ch + k * Wdims[1] + out_ch * Wdims[0] * Wdims[1];
// Wout[offset_new] = Win[offset++];
// }
// }
// }
//}
int main(int argc, char *argv[]) {
int pads = 0;
if (5 > argc) {
std::cout << "Usage: " << argv[0] << " <W> <B> <I> <L> [pads=0]\n";
return -1;
} else if (5 < argc) {
pads = atoi(argv[5]);
}
//std::cout << cudnnGetVersion() << std::endl;
// read the filter from file
std::string dummy_line;
int Wdims[3]; // {K, I, O}
std::ifstream ifs(argv[1]);
getline(ifs, dummy_line); // dummy line with "K I O"
ifs >> Wdims[0] >> Wdims[1] >> Wdims[2];
int Wsize = Wdims[0] * Wdims[1] * Wdims[2];
float *Wdata = new float[Wsize];
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
for (int k = 0; k < Wdims[0]; ++k) {
int Wcoo[3] = {k, in_ch, out_ch};
int offset = offsetKIO(Wdims, Wcoo);
ifs >> Wdata[offset];
}
}
}
ifs.close();
// read the bias from file
int Bdims[1]; // {O}
ifs.open(argv[2]);
getline(ifs, dummy_line); // dummy line with "O"
ifs >> Bdims[0];
int Bsize = Bdims[0];
if (Bdims[0] != Wdims[2]) {
std::cout << "Bias should as long as the number of output channels\n";
return -1;
}
float *Bdata = new float[Bsize];
for (int out_ch = 0; out_ch < Bdims[0]; ++out_ch) {
ifs >> Bdata[out_ch];
}
ifs.close();
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
std::cout << "\033[1mWeights\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "] (bias=" << std::setw(7) << std::fixed <<
std::setprecision(4) << Bdata[out_ch] <<")\033[0m\n";
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Wdims[0] - 1 << "]: ";
for (int k = 0; k < Wdims[0]; ++k) {
int Wcoo[3] = {k, in_ch, out_ch};
int offset = offsetKIO(Wdims, Wcoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Wdata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// read the inputs from file
int Idims[3]; // {T, B, C}
ifs.open(argv[3]);
getline(ifs, dummy_line); // dummy line with "T B C"
ifs >> Idims[0] >> Idims[1] >> Idims[2];
int Isize = Idims[0] * Idims[1] * Idims[2];
float *Idata = new float[Isize];
for (int batch = 0; batch < Idims[1]; ++batch) {
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
ifs >> Idata[offset];
}
}
}
ifs.close();
for (int batch = 0; batch < Idims[1]; ++batch) {
std::cout << "\033[1mInputs[batch=" << batch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Idims[0] - 1 << "]: ";
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Idata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// reorganize weights and inputs
float *Wdata4D = new float[Wsize];
reorg_weights(Wdims, Wdata, Wdata4D);
int Wdims4D[4] = {Wdims[2], // output channels
Wdims[1], // input channels
1, // to makeup 4D filter for convolution
Wdims[0]}; // filter length K
int Idims4D[4] = {Idims[1], // batch
Idims[2], // input channels
1, // to makeup 4D tensor for convolution
Idims[0]}; // time
int Istride[4] = {Idims[2], // next bach comes after all in_ch
1, // next channels is stored right after
1, // will not get non-zero coo on this dimension
Idims[1] * Idims[2]}; // next time point is after B*C
int Odims4D[4] = {Idims[1], // B of outputs equals to B of inputs
Wdims[2], // C of outputs equals to O of weights
1, // to makeup 4D tensor for convolution
Idims[0] - Wdims[0] + 1 + 2 * pads}; // T of outputs
int Ostride[4] = {Odims4D[1], // next batch comes after all in_ch
1, // next channel is stored to the next
1, // will not got non-zero coo on this dimension
Odims4D[0] * Odims4D[1]}; // next time point is after B*C
int Osize = Odims4D[0] * Odims4D[1] * Odims4D[3];
float *Odata = new float[Osize];
int pad0s[2] = {0, pads};
int dilation1s[2] = {1, 1}; // dilations
int stride1s[2] = {1, 1}; // strides
// create CuDNN context
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// create convolution layer
cudnnConvolutionDescriptor_t conv_desc;
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
checkCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc, // handle
2, // arrayLength
pad0s, // paddings
stride1s, // stride
dilation1s, // dilation
CUDNN_CROSS_CORRELATION ,// mode
CUDNN_DATA_FLOAT));//precision
// create filter
cudnnFilterDescriptor_t fil_desc;
checkCUDNN(cudnnCreateFilterDescriptor(&fil_desc));
checkCUDNN(cudnnSetFilterNdDescriptor(fil_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
CUDNN_TENSOR_NCHW, // layout
4, // dimension
Wdims4D)); // filter size
// create the tensor for input
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(in_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Idims4D, // size
Istride)); // stride
// create the tensor for output
cudnnTensorDescriptor_t out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&out_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(out_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Odims4D, // size
Ostride)); // stride
//// find convolution algorithm
int algos_cnt = 8;
cudnnConvolutionFwdAlgoPerf_t conv_algos[8];
checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnn,
in_desc,
fil_desc,
conv_desc,
out_desc,
algos_cnt,
&algos_cnt,
conv_algos));
bool isfeasible = false;
for (int i = 0; i < algos_cnt && 0 == conv_algos[i].status; ++i) {
std::cout << "cudnnConvolutionFwdAlgo_t: " <<
CONV_FWD_ALGO[conv_algos[i].algo] <<
" (" << conv_algos[i].time << " ms)\n";
std::cout << "workspace: " << conv_algos[i].memory << " "
"determinism: " << conv_algos[i].determinism << " "
"mathType: " << conv_algos[i].mathType << std::endl;
isfeasible = true;
}
std::cout << std::endl;
if (!isfeasible) {
// dimensions check
std::cout << "Layout: {N, C, H, W}\n";
std::cout << "Wdims4D: {" << Wdims4D[0] << ", " << Wdims4D[1] <<
", " << Wdims4D[2] << ", " << Wdims4D[3] << "}\n";
std::cout << "Idims4D: {" << Idims4D[0] << ", " << Idims4D[1] <<
", " << Idims4D[2] << ", " << Idims4D[3] << "}\n";
std::cout << "Odims4D: {" << Odims4D[0] << ", " << Odims4D[1] <<
", " << Odims4D[2] << ", " << Odims4D[3] << "}\n";
checkCUDNN(cudnnGetConvolutionNdForwardOutputDim(conv_desc,
in_desc,
fil_desc,
4,
Odims4D));
std::cout << "Supposed to be\n";
std::cout << "Odims4D: {" << Odims4D[0] << ", " << Odims4D[1] <<
", " << Odims4D[2] << ", " << Odims4D[3] << "}\n";
} else {
// allocate memory on GPU
float *Wdata_GPU;
float *Idata_GPU;
float *Odata_GPU;
char *workspace = NULL;
checkCUDA(hipMalloc(&Wdata_GPU, Wsize * sizeof(float)));
checkCUDA(hipMalloc(&Idata_GPU, Isize * sizeof(float)));
checkCUDA(hipMalloc(&Odata_GPU, Osize * sizeof(float)));
if (conv_algos[0].memory) {
checkCUDA(hipMalloc(&workspace, conv_algos[0].memory));
}
// fill the Odata_GPU with bias
int bias_expand = Odims4D[0] * Odims4D[3]; // size to expand
hipStream_t *cpystreams = new hipStream_t[bias_expand];
for (int cnt = 0; cnt < bias_expand; ++cnt) {
checkCUDA(hipStreamCreate(&cpystreams[cnt]));
checkCUDA(hipMemcpyAsync(Odata_GPU + cnt * Bsize, Bdata,
Bsize * sizeof(float), hipMemcpyHostToDevice,
cpystreams[cnt]));
}
for (int cnt = 0; cnt < bias_expand; ++cnt) {
checkCUDA(hipStreamSynchronize(cpystreams[cnt]));
checkCUDA(hipStreamDestroy(cpystreams[cnt]));
}
delete[] cpystreams;
// prepare data on GPU and compute
float alpha = 1.0;
float beta = 1.0;
checkCUDA(hipMemcpy(Wdata_GPU, Wdata4D, Wsize * sizeof(float),
hipMemcpyHostToDevice));
checkCUDA(hipMemcpy(Idata_GPU, Idata, Isize * sizeof(float),
hipMemcpyHostToDevice));
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
in_desc,
Idata_GPU,
fil_desc,
Wdata_GPU,
conv_desc,
conv_algos[0].algo,
&workspace,
conv_algos[0].memory,
&beta,
out_desc,
Odata_GPU));
checkCUDA(hipMemcpy(Odata, Odata_GPU, Osize * sizeof(float),
hipMemcpyDeviceToHost));
if (conv_algos[0].memory) {
checkCUDA(hipFree(workspace));
}
checkCUDA(hipFree(Odata_GPU));
//int Odims[3] = {Odims4D[2], Odims4D[0], Odims4D[3]};
int Odims[3] = {Odims4D[3], Odims4D[0], Odims4D[1]};
for (int batch = 0; batch < Odims[1]; ++batch) {
std::cout << "\033[1mOutputs[batch=" << batch << "]\033[0m\n";
for (int out_ch = 0; out_ch < Odims[2]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "]\033[0m[0:" << Odims[0] - 1 << "]: ";
for (int t = 0; t < Odims[0]; ++t) {
int Ocoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Odims, Ocoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Odata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// read the loss from file
int Ldims[3]; // {T, B, C}
ifs.open(argv[4]);
getline(ifs, dummy_line); // dummy line with "T B C"
ifs >> Ldims[0] >> Ldims[1] >> Ldims[2];
if (Ldims[0] != Odims[0] || Ldims[1] != Odims[1] || Ldims[2] != Odims[2]) {
std::cout << "Loss is supposed to have the same dimension as outputs\n";
} else {
int Lsize = Osize;
float *Ldata = new float[Lsize];
for (int batch = 0; batch < Ldims[1]; ++batch) {
for (int out_ch = 0; out_ch < Ldims[2]; ++out_ch) {
for (int t = 0; t < Ldims[0]; ++t) {
int Lcoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Ldims, Lcoo);
ifs >> Ldata[offset];
}
}
}
ifs.close();
for (int batch = 0; batch < Ldims[1]; ++batch) {
std::cout << "\033[2mLoss[batch=" << batch << "]\033[0m\n";
for (int out_ch = 0; out_ch < Ldims[2]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "]\033[0m[0:" << Ldims[0] - 1 << "]: ";
for (int t = 0; t < Ldims[0]; ++t) {
int Lcoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Ldims, Lcoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Ldata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// reorganize loss for back propogation
float *Ldata4D = new float[Osize];
reorg_loss(Ldims, Ldata, Ldata4D);
int Ldims4D[4] = {Ldims[1], // batch
Ldims[2], // output channels
1, // to makeup 4D tensor
Ldims[0]}; // time
int Lstride[4] = {Ldims4D[3] * Ldims4D[2] * Ldims4D[1],
Ldims4D[3] * Ldims4D[2],
Ldims4D[3], // to satisfy _CHW-packed
1}; // next time point is right after
// backward for bias
int Bdims4D[4] = {1, Bdims[0], 1, 1};
int Bstride[4] = {1, 1, 1, 1};
float *Ldata_GPU = NULL;
float *Bias_GPU = NULL;
checkCUDA(hipMalloc(&Ldata_GPU, Lsize * sizeof(float)));
checkCUDA(hipMalloc(&Bias_GPU, Bsize * sizeof(float)));
checkCUDA(hipMemcpy(Ldata_GPU, Ldata4D, Lsize * sizeof(float),
hipMemcpyHostToDevice));
checkCUDA(hipMemset(Bias_GPU, 0, Bsize * sizeof(float)));
// create the tensor for loss
cudnnTensorDescriptor_t loss_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&loss_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(loss_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Ldims4D, // size
Lstride)); // stride
// create the tensor for bias gradiant
cudnnTensorDescriptor_t bias_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(bias_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Bdims4D, // size
Bstride)); // stride
checkCUDNN(cudnnConvolutionBackwardBias(cudnn,
&alpha,
loss_desc,
Ldata_GPU,
&beta,
bias_desc,
Bias_GPU));
float *Bias = new float[Bsize];
checkCUDA(hipMemcpy(Bias, Bias_GPU, Bsize * sizeof(float),
hipMemcpyDeviceToHost));
checkCUDA(hipFree(Bias_GPU));
cudnnDestroyTensorDescriptor(bias_desc);
std::cout << "\033[1mBias grad\033[0m\n[0:" << Bdims4D[1] - 1 << "]: ";
for (int out_ch = 0; out_ch < Bdims4D[1]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m" << std::setw(7) <<
std::fixed << std::setprecision(4) << Bias[out_ch] << " \033[0m";
}
std::cout << std::endl << std::endl;;
delete[] Bias;
// backward for weights
algos_cnt = 6;
cudnnConvolutionBwdFilterAlgoPerf_t bwd_f_algos[6];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn,
in_desc,
loss_desc,
conv_desc,
fil_desc,
algos_cnt,
&algos_cnt,
bwd_f_algos));
for (int i = 0; i < algos_cnt && 0 == bwd_f_algos[i].status; ++i) {
std::cout << "cudnnConvolutionBwdFilterAlgo_t " <<
FILTER_BWD_ALGO[bwd_f_algos[i].algo] <<
" (" << bwd_f_algos[i].time << " ms)\n";
std::cout << "workspace: " << bwd_f_algos[i].memory << " "
"determinism: " << bwd_f_algos[i].determinism << " "
"mathType: " << bwd_f_algos[i].mathType << std::endl;
}
std::cout << std::endl;
workspace = NULL;
if (bwd_f_algos[0].memory) {
checkCUDA(hipMalloc(&workspace, bwd_f_algos[0].memory));
}
float *WGdata_GPU;
checkCUDA(hipMalloc(&WGdata_GPU, Wsize * sizeof(float)));
checkCUDA(hipMemset(WGdata_GPU, 0, Wsize * sizeof(float)));
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn,
&alpha,
in_desc,
Idata_GPU,
loss_desc,
Ldata_GPU,
conv_desc,
bwd_f_algos[0].algo,
workspace,
bwd_f_algos[0].memory,
&beta,
fil_desc,
WGdata_GPU));
if (workspace) {
checkCUDA(hipFree(workspace));
workspace = NULL;
}
checkCUDA(hipMemcpy(Wdata4D, WGdata_GPU, Wsize * sizeof(float),
hipMemcpyDeviceToHost));
checkCUDA(hipFree(WGdata_GPU));
for (int out_ch = 0; out_ch < Wdims4D[0]; ++out_ch) {
std::cout << "\033[1mWeights gradiant\033[3" << (6 - out_ch) <<
"m[out_ch=" << out_ch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Wdims4D[1]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Wdims4D[3] - 1 << "]: ";
for (int k = 0; k < Wdims4D[3]; ++k) {
int offset = k + in_ch * Wdims4D[3] +
out_ch * Wdims4D[3] * Wdims4D[1];
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Wdata4D[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// backward for inputs
algos_cnt = 6;
cudnnConvolutionBwdDataAlgoPerf_t bwd_d_algos[6];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnn,
fil_desc,
loss_desc,
conv_desc,
in_desc,
algos_cnt,
&algos_cnt,
bwd_d_algos));
for (int i = 0; i < algos_cnt && 0 == bwd_d_algos[i].status; ++i) {
std::cout << "cudnnConvolutionBwdDataAlgo_t " <<
DATA_BWD_ALGO[bwd_d_algos[i].algo] <<
" (" << bwd_d_algos[i].time << " ms)\n";
std::cout << "workspace: " << bwd_d_algos[i].memory << " "
"determinism: " << bwd_d_algos[i].determinism << " "
"mathType: " << bwd_d_algos[i].mathType << std::endl;
}
std::cout << std::endl;
workspace = NULL;
if (bwd_d_algos[0].memory) {
checkCUDA(hipMalloc(&workspace, bwd_d_algos[0].memory));
}
float *IGdata_GPU = Idata_GPU; // could reuse
checkCUDA(hipMemset(IGdata_GPU, 0, Isize * sizeof(float)));
checkCUDNN(cudnnConvolutionBackwardData(cudnn,
&alpha,
fil_desc,
Wdata_GPU,
loss_desc,
Ldata_GPU,
conv_desc,
bwd_d_algos[0].algo,
workspace,
bwd_d_algos[0].memory,
&beta,
in_desc,
IGdata_GPU));
if (workspace) {
checkCUDA(hipFree(workspace));
workspace = NULL;
}
checkCUDA(hipMemcpy(Idata, IGdata_GPU, Isize * sizeof(float),
hipMemcpyDeviceToHost));
for (int batch = 0; batch < Idims[1]; ++batch) {
std::cout << "\033[1mInputs gradiant[batch=" << batch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Idims[0] - 1 << "]: ";
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Idata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
delete[] Ldata, Ldata4D;
checkCUDA(hipFree(Ldata_GPU));
cudnnDestroyTensorDescriptor(loss_desc);
} // if(Ldims[] == Odims[])
checkCUDA(hipFree(Idata_GPU));
checkCUDA(hipFree(Wdata_GPU));
} // if(isfeasible)
delete[] Wdata, Bdata, Idata, Odata, Wdata4D;
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(fil_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(cudnn);
return 0;
}
| 65cab7a443e1a299545f2e019f2e11652265f21e.cu | // Let the GPU perform a 2-dimension convolution over filter KIO on tensor TBC
// To compile: nvcc -arch=sm_70 -std=c++14 -o conv_tbc cudnnconvTBC.cu -lcudnn
// Example command: ./conv_tbc testW testB testI testL
// Last modified: Bambo Wu 05/12/2019
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include <cstdlib>
#include <cudnn.h>
std::string CONV_FWD_ALGO[8] = {
"IMPLICIT_GEMM",
"IMPLICIT_PRECOMP_GEMM",
"GEMM",
"DIRECT",
"FFT",
"FFT_TILING",
"WINOGRAD",
"WINOGRAD_NONFUSED"};
std::string FILTER_BWD_ALGO[6] = {
"BWD_FILTER_ALGO_0",
"BWD_FILTER_ALGO_1",
"FFT",
"BWD_FILTER_ALGO_3",
"WINOGRAD_NONFUSED",
"FFT_TILING"};
std::string DATA_BWD_ALGO[6] = {
"BWD_DATA_ALGO_0",
"BWD_DATA_ALGO_1",
"FFT",
"FFT_TILING",
"WINOGRAD",
"WINOGRAD_NONFUSED"};
//function to print out error message from CUDA calls
#define checkCUDA(exp) \
{ \
cudaError_t status = (exp); \
if(status != cudaSuccess) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudaGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
//function to print out error message from cuDNN calls
#define checkCUDNN(exp) \
{ \
cudnnStatus_t status = (exp); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
int offsetTBC(int *dims, int *coo) {
//int T = dims[0];
int B = dims[1];
int C = dims[2];
int t = coo[0];
int b = coo[1];
int c = coo[2];
return c + b * C + t * B * C;
}
int offsetKIO(int *dims, int *coo) {
//int K = dims[0];
int I = dims[1];
int O = dims[2];
int k = coo[0];
int i = coo[1];
int o = coo[2];
return o + i * O + k * I * O;
}
// reorganize weights from KIO to OIK, Win, Wout should no overlap
void reorg_weights(int *Wdims, float *Win, float *Wout) {
int offset = 0;
for (int k = 0; k < Wdims[0]; ++k) {
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
int offset_new = k + in_ch * Wdims[0] + out_ch * Wdims[0] * Wdims[1];
Wout[offset_new] = Win[offset++];
}
}
}
}
// reorganize loss from TBC to NCHW, Lin, Lout should no overlap
void reorg_loss(int *Ldims, float *Lin, float *Lout) {
int offset = 0;
for (int t = 0; t < Ldims[0]; ++t) {
for (int batch = 0; batch < Ldims[1]; ++batch) {
for (int ch = 0; ch < Ldims[2]; ++ch) {
int offset_new = t + ch * Ldims[0] + batch * Ldims[0] * Ldims[2];
Lout[offset_new] = Lin[offset++];
}
}
}
}
//// reorganize weights from KIO to OKI, Win, Wout should no overlap
//void reorg_weights(int *Wdims, float *Win, float *Wout) {
// int offset = 0;
// for (int k = 0; k < Wdims[0]; ++k) {
// for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
// for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
// int offset_new = in_ch + k * Wdims[1] + out_ch * Wdims[0] * Wdims[1];
// Wout[offset_new] = Win[offset++];
// }
// }
// }
//}
int main(int argc, char *argv[]) {
int pads = 0;
if (5 > argc) {
std::cout << "Usage: " << argv[0] << " <W> <B> <I> <L> [pads=0]\n";
return -1;
} else if (5 < argc) {
pads = atoi(argv[5]);
}
//std::cout << cudnnGetVersion() << std::endl;
// read the filter from file
std::string dummy_line;
int Wdims[3]; // {K, I, O}
std::ifstream ifs(argv[1]);
getline(ifs, dummy_line); // dummy line with "K I O"
ifs >> Wdims[0] >> Wdims[1] >> Wdims[2];
int Wsize = Wdims[0] * Wdims[1] * Wdims[2];
float *Wdata = new float[Wsize];
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
for (int k = 0; k < Wdims[0]; ++k) {
int Wcoo[3] = {k, in_ch, out_ch};
int offset = offsetKIO(Wdims, Wcoo);
ifs >> Wdata[offset];
}
}
}
ifs.close();
// read the bias from file
int Bdims[1]; // {O}
ifs.open(argv[2]);
getline(ifs, dummy_line); // dummy line with "O"
ifs >> Bdims[0];
int Bsize = Bdims[0];
if (Bdims[0] != Wdims[2]) {
std::cout << "Bias should as long as the number of output channels\n";
return -1;
}
float *Bdata = new float[Bsize];
for (int out_ch = 0; out_ch < Bdims[0]; ++out_ch) {
ifs >> Bdata[out_ch];
}
ifs.close();
for (int out_ch = 0; out_ch < Wdims[2]; ++out_ch) {
std::cout << "\033[1mWeights\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "] (bias=" << std::setw(7) << std::fixed <<
std::setprecision(4) << Bdata[out_ch] <<")\033[0m\n";
for (int in_ch = 0; in_ch < Wdims[1]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Wdims[0] - 1 << "]: ";
for (int k = 0; k < Wdims[0]; ++k) {
int Wcoo[3] = {k, in_ch, out_ch};
int offset = offsetKIO(Wdims, Wcoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Wdata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// read the inputs from file
int Idims[3]; // {T, B, C}
ifs.open(argv[3]);
getline(ifs, dummy_line); // dummy line with "T B C"
ifs >> Idims[0] >> Idims[1] >> Idims[2];
int Isize = Idims[0] * Idims[1] * Idims[2];
float *Idata = new float[Isize];
for (int batch = 0; batch < Idims[1]; ++batch) {
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
ifs >> Idata[offset];
}
}
}
ifs.close();
for (int batch = 0; batch < Idims[1]; ++batch) {
std::cout << "\033[1mInputs[batch=" << batch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Idims[0] - 1 << "]: ";
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Idata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// reorganize weights and inputs
float *Wdata4D = new float[Wsize];
reorg_weights(Wdims, Wdata, Wdata4D);
int Wdims4D[4] = {Wdims[2], // output channels
Wdims[1], // input channels
1, // to makeup 4D filter for convolution
Wdims[0]}; // filter length K
int Idims4D[4] = {Idims[1], // batch
Idims[2], // input channels
1, // to makeup 4D tensor for convolution
Idims[0]}; // time
int Istride[4] = {Idims[2], // next bach comes after all in_ch
1, // next channels is stored right after
1, // will not get non-zero coo on this dimension
Idims[1] * Idims[2]}; // next time point is after B*C
int Odims4D[4] = {Idims[1], // B of outputs equals to B of inputs
Wdims[2], // C of outputs equals to O of weights
1, // to makeup 4D tensor for convolution
Idims[0] - Wdims[0] + 1 + 2 * pads}; // T of outputs
int Ostride[4] = {Odims4D[1], // next batch comes after all in_ch
1, // next channel is stored to the next
1, // will not got non-zero coo on this dimension
Odims4D[0] * Odims4D[1]}; // next time point is after B*C
int Osize = Odims4D[0] * Odims4D[1] * Odims4D[3];
float *Odata = new float[Osize];
int pad0s[2] = {0, pads};
int dilation1s[2] = {1, 1}; // dilations
int stride1s[2] = {1, 1}; // strides
// create CuDNN context
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// create convolution layer
cudnnConvolutionDescriptor_t conv_desc;
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
checkCUDNN(cudnnSetConvolutionNdDescriptor(conv_desc, // handle
2, // arrayLength
pad0s, // paddings
stride1s, // stride
dilation1s, // dilation
CUDNN_CROSS_CORRELATION ,// mode
CUDNN_DATA_FLOAT));//precision
// create filter
cudnnFilterDescriptor_t fil_desc;
checkCUDNN(cudnnCreateFilterDescriptor(&fil_desc));
checkCUDNN(cudnnSetFilterNdDescriptor(fil_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
CUDNN_TENSOR_NCHW, // layout
4, // dimension
Wdims4D)); // filter size
// create the tensor for input
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(in_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Idims4D, // size
Istride)); // stride
// create the tensor for output
cudnnTensorDescriptor_t out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&out_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(out_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Odims4D, // size
Ostride)); // stride
//// find convolution algorithm
int algos_cnt = 8;
cudnnConvolutionFwdAlgoPerf_t conv_algos[8];
checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnn,
in_desc,
fil_desc,
conv_desc,
out_desc,
algos_cnt,
&algos_cnt,
conv_algos));
bool isfeasible = false;
for (int i = 0; i < algos_cnt && 0 == conv_algos[i].status; ++i) {
std::cout << "cudnnConvolutionFwdAlgo_t: " <<
CONV_FWD_ALGO[conv_algos[i].algo] <<
" (" << conv_algos[i].time << " ms)\n";
std::cout << "workspace: " << conv_algos[i].memory << " "
"determinism: " << conv_algos[i].determinism << " "
"mathType: " << conv_algos[i].mathType << std::endl;
isfeasible = true;
}
std::cout << std::endl;
if (!isfeasible) {
// dimensions check
std::cout << "Layout: {N, C, H, W}\n";
std::cout << "Wdims4D: {" << Wdims4D[0] << ", " << Wdims4D[1] <<
", " << Wdims4D[2] << ", " << Wdims4D[3] << "}\n";
std::cout << "Idims4D: {" << Idims4D[0] << ", " << Idims4D[1] <<
", " << Idims4D[2] << ", " << Idims4D[3] << "}\n";
std::cout << "Odims4D: {" << Odims4D[0] << ", " << Odims4D[1] <<
", " << Odims4D[2] << ", " << Odims4D[3] << "}\n";
checkCUDNN(cudnnGetConvolutionNdForwardOutputDim(conv_desc,
in_desc,
fil_desc,
4,
Odims4D));
std::cout << "Supposed to be\n";
std::cout << "Odims4D: {" << Odims4D[0] << ", " << Odims4D[1] <<
", " << Odims4D[2] << ", " << Odims4D[3] << "}\n";
} else {
// allocate memory on GPU
float *Wdata_GPU;
float *Idata_GPU;
float *Odata_GPU;
char *workspace = NULL;
checkCUDA(cudaMalloc(&Wdata_GPU, Wsize * sizeof(float)));
checkCUDA(cudaMalloc(&Idata_GPU, Isize * sizeof(float)));
checkCUDA(cudaMalloc(&Odata_GPU, Osize * sizeof(float)));
if (conv_algos[0].memory) {
checkCUDA(cudaMalloc(&workspace, conv_algos[0].memory));
}
// fill the Odata_GPU with bias
int bias_expand = Odims4D[0] * Odims4D[3]; // size to expand
cudaStream_t *cpystreams = new cudaStream_t[bias_expand];
for (int cnt = 0; cnt < bias_expand; ++cnt) {
checkCUDA(cudaStreamCreate(&cpystreams[cnt]));
checkCUDA(cudaMemcpyAsync(Odata_GPU + cnt * Bsize, Bdata,
Bsize * sizeof(float), cudaMemcpyHostToDevice,
cpystreams[cnt]));
}
for (int cnt = 0; cnt < bias_expand; ++cnt) {
checkCUDA(cudaStreamSynchronize(cpystreams[cnt]));
checkCUDA(cudaStreamDestroy(cpystreams[cnt]));
}
delete[] cpystreams;
// prepare data on GPU and compute
float alpha = 1.0;
float beta = 1.0;
checkCUDA(cudaMemcpy(Wdata_GPU, Wdata4D, Wsize * sizeof(float),
cudaMemcpyHostToDevice));
checkCUDA(cudaMemcpy(Idata_GPU, Idata, Isize * sizeof(float),
cudaMemcpyHostToDevice));
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
in_desc,
Idata_GPU,
fil_desc,
Wdata_GPU,
conv_desc,
conv_algos[0].algo,
&workspace,
conv_algos[0].memory,
&beta,
out_desc,
Odata_GPU));
checkCUDA(cudaMemcpy(Odata, Odata_GPU, Osize * sizeof(float),
cudaMemcpyDeviceToHost));
if (conv_algos[0].memory) {
checkCUDA(cudaFree(workspace));
}
checkCUDA(cudaFree(Odata_GPU));
//int Odims[3] = {Odims4D[2], Odims4D[0], Odims4D[3]};
int Odims[3] = {Odims4D[3], Odims4D[0], Odims4D[1]};
for (int batch = 0; batch < Odims[1]; ++batch) {
std::cout << "\033[1mOutputs[batch=" << batch << "]\033[0m\n";
for (int out_ch = 0; out_ch < Odims[2]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "]\033[0m[0:" << Odims[0] - 1 << "]: ";
for (int t = 0; t < Odims[0]; ++t) {
int Ocoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Odims, Ocoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Odata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// read the loss from file
int Ldims[3]; // {T, B, C}
ifs.open(argv[4]);
getline(ifs, dummy_line); // dummy line with "T B C"
ifs >> Ldims[0] >> Ldims[1] >> Ldims[2];
if (Ldims[0] != Odims[0] || Ldims[1] != Odims[1] || Ldims[2] != Odims[2]) {
std::cout << "Loss is supposed to have the same dimension as outputs\n";
} else {
int Lsize = Osize;
float *Ldata = new float[Lsize];
for (int batch = 0; batch < Ldims[1]; ++batch) {
for (int out_ch = 0; out_ch < Ldims[2]; ++out_ch) {
for (int t = 0; t < Ldims[0]; ++t) {
int Lcoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Ldims, Lcoo);
ifs >> Ldata[offset];
}
}
}
ifs.close();
for (int batch = 0; batch < Ldims[1]; ++batch) {
std::cout << "\033[2mLoss[batch=" << batch << "]\033[0m\n";
for (int out_ch = 0; out_ch < Ldims[2]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m[out_ch=" <<
out_ch << "]\033[0m[0:" << Ldims[0] - 1 << "]: ";
for (int t = 0; t < Ldims[0]; ++t) {
int Lcoo[3] = {t, batch, out_ch};
int offset = offsetTBC(Ldims, Lcoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Ldata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// reorganize loss for back propogation
float *Ldata4D = new float[Osize];
reorg_loss(Ldims, Ldata, Ldata4D);
int Ldims4D[4] = {Ldims[1], // batch
Ldims[2], // output channels
1, // to makeup 4D tensor
Ldims[0]}; // time
int Lstride[4] = {Ldims4D[3] * Ldims4D[2] * Ldims4D[1],
Ldims4D[3] * Ldims4D[2],
Ldims4D[3], // to satisfy _CHW-packed
1}; // next time point is right after
// backward for bias
int Bdims4D[4] = {1, Bdims[0], 1, 1};
int Bstride[4] = {1, 1, 1, 1};
float *Ldata_GPU = NULL;
float *Bias_GPU = NULL;
checkCUDA(cudaMalloc(&Ldata_GPU, Lsize * sizeof(float)));
checkCUDA(cudaMalloc(&Bias_GPU, Bsize * sizeof(float)));
checkCUDA(cudaMemcpy(Ldata_GPU, Ldata4D, Lsize * sizeof(float),
cudaMemcpyHostToDevice));
checkCUDA(cudaMemset(Bias_GPU, 0, Bsize * sizeof(float)));
// create the tensor for loss
cudnnTensorDescriptor_t loss_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&loss_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(loss_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Ldims4D, // size
Lstride)); // stride
// create the tensor for bias gradiant
cudnnTensorDescriptor_t bias_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
checkCUDNN(cudnnSetTensorNdDescriptor(bias_desc, // descriptor
CUDNN_DATA_FLOAT, // precision
4, // dimension
Bdims4D, // size
Bstride)); // stride
checkCUDNN(cudnnConvolutionBackwardBias(cudnn,
&alpha,
loss_desc,
Ldata_GPU,
&beta,
bias_desc,
Bias_GPU));
float *Bias = new float[Bsize];
checkCUDA(cudaMemcpy(Bias, Bias_GPU, Bsize * sizeof(float),
cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(Bias_GPU));
cudnnDestroyTensorDescriptor(bias_desc);
std::cout << "\033[1mBias grad\033[0m\n[0:" << Bdims4D[1] - 1 << "]: ";
for (int out_ch = 0; out_ch < Bdims4D[1]; ++out_ch) {
std::cout << "\033[3" << (6 - out_ch) << "m" << std::setw(7) <<
std::fixed << std::setprecision(4) << Bias[out_ch] << " \033[0m";
}
std::cout << std::endl << std::endl;;
delete[] Bias;
// backward for weights
algos_cnt = 6;
cudnnConvolutionBwdFilterAlgoPerf_t bwd_f_algos[6];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn,
in_desc,
loss_desc,
conv_desc,
fil_desc,
algos_cnt,
&algos_cnt,
bwd_f_algos));
for (int i = 0; i < algos_cnt && 0 == bwd_f_algos[i].status; ++i) {
std::cout << "cudnnConvolutionBwdFilterAlgo_t " <<
FILTER_BWD_ALGO[bwd_f_algos[i].algo] <<
" (" << bwd_f_algos[i].time << " ms)\n";
std::cout << "workspace: " << bwd_f_algos[i].memory << " "
"determinism: " << bwd_f_algos[i].determinism << " "
"mathType: " << bwd_f_algos[i].mathType << std::endl;
}
std::cout << std::endl;
workspace = NULL;
if (bwd_f_algos[0].memory) {
checkCUDA(cudaMalloc(&workspace, bwd_f_algos[0].memory));
}
float *WGdata_GPU;
checkCUDA(cudaMalloc(&WGdata_GPU, Wsize * sizeof(float)));
checkCUDA(cudaMemset(WGdata_GPU, 0, Wsize * sizeof(float)));
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn,
&alpha,
in_desc,
Idata_GPU,
loss_desc,
Ldata_GPU,
conv_desc,
bwd_f_algos[0].algo,
workspace,
bwd_f_algos[0].memory,
&beta,
fil_desc,
WGdata_GPU));
if (workspace) {
checkCUDA(cudaFree(workspace));
workspace = NULL;
}
checkCUDA(cudaMemcpy(Wdata4D, WGdata_GPU, Wsize * sizeof(float),
cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(WGdata_GPU));
for (int out_ch = 0; out_ch < Wdims4D[0]; ++out_ch) {
std::cout << "\033[1mWeights gradiant\033[3" << (6 - out_ch) <<
"m[out_ch=" << out_ch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Wdims4D[1]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Wdims4D[3] - 1 << "]: ";
for (int k = 0; k < Wdims4D[3]; ++k) {
int offset = k + in_ch * Wdims4D[3] +
out_ch * Wdims4D[3] * Wdims4D[1];
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Wdata4D[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
// backward for inputs
algos_cnt = 6;
cudnnConvolutionBwdDataAlgoPerf_t bwd_d_algos[6];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnn,
fil_desc,
loss_desc,
conv_desc,
in_desc,
algos_cnt,
&algos_cnt,
bwd_d_algos));
for (int i = 0; i < algos_cnt && 0 == bwd_d_algos[i].status; ++i) {
std::cout << "cudnnConvolutionBwdDataAlgo_t " <<
DATA_BWD_ALGO[bwd_d_algos[i].algo] <<
" (" << bwd_d_algos[i].time << " ms)\n";
std::cout << "workspace: " << bwd_d_algos[i].memory << " "
"determinism: " << bwd_d_algos[i].determinism << " "
"mathType: " << bwd_d_algos[i].mathType << std::endl;
}
std::cout << std::endl;
workspace = NULL;
if (bwd_d_algos[0].memory) {
checkCUDA(cudaMalloc(&workspace, bwd_d_algos[0].memory));
}
float *IGdata_GPU = Idata_GPU; // could reuse
checkCUDA(cudaMemset(IGdata_GPU, 0, Isize * sizeof(float)));
checkCUDNN(cudnnConvolutionBackwardData(cudnn,
&alpha,
fil_desc,
Wdata_GPU,
loss_desc,
Ldata_GPU,
conv_desc,
bwd_d_algos[0].algo,
workspace,
bwd_d_algos[0].memory,
&beta,
in_desc,
IGdata_GPU));
if (workspace) {
checkCUDA(cudaFree(workspace));
workspace = NULL;
}
checkCUDA(cudaMemcpy(Idata, IGdata_GPU, Isize * sizeof(float),
cudaMemcpyDeviceToHost));
for (int batch = 0; batch < Idims[1]; ++batch) {
std::cout << "\033[1mInputs gradiant[batch=" << batch << "]\033[0m\n";
for (int in_ch = 0; in_ch < Idims[2]; ++in_ch) {
std::cout << "\033[9" << in_ch + 1 << "m[in_ch=" <<
in_ch << "]\033[0m[0:" << Idims[0] - 1 << "]: ";
for (int t = 0; t < Idims[0]; ++t) {
int Icoo[3] = {t, batch, in_ch};
int offset = offsetTBC(Idims, Icoo);
std::cout << std::setw(7) << std::fixed <<
std::setprecision(4) << Idata[offset] << " ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
delete[] Ldata, Ldata4D;
checkCUDA(cudaFree(Ldata_GPU));
cudnnDestroyTensorDescriptor(loss_desc);
} // if(Ldims[] == Odims[])
checkCUDA(cudaFree(Idata_GPU));
checkCUDA(cudaFree(Wdata_GPU));
} // if(isfeasible)
delete[] Wdata, Bdata, Idata, Odata, Wdata4D;
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(fil_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(cudnn);
return 0;
}
|
aeeb759d81a462634412d5cab088b88aba427152.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
namespace faiss {
namespace gpu {
FlatIndex::FlatIndex(
GpuResources* res,
int dim,
bool useFloat16,
MemorySpace space)
: resources_(res),
dim_(dim),
useFloat16_(useFloat16),
space_(space),
num_(0),
rawData32_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())),
rawData16_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())) {}
bool FlatIndex::getUseFloat16() const {
return useFloat16_;
}
/// Returns the number of vectors we contain
idx_t FlatIndex::getSize() const {
if (useFloat16_) {
return vectorsHalf_.getSize(0);
} else {
return vectors_.getSize(0);
}
}
int FlatIndex::getDim() const {
return dim_;
}
void FlatIndex::reserve(size_t numVecs, hipStream_t stream) {
if (useFloat16_) {
rawData16_.reserve(numVecs * dim_ * sizeof(half), stream);
} else {
rawData32_.reserve(numVecs * dim_ * sizeof(float), stream);
}
// The above may have caused a reallocation, we need to update the vector
// types
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
}
Tensor<float, 2, true>& FlatIndex::getVectorsFloat32Ref() {
// Should not call this unless we are in float32 mode
FAISS_ASSERT(!useFloat16_);
return vectors_;
}
Tensor<half, 2, true>& FlatIndex::getVectorsFloat16Ref() {
// Should not call this unless we are in float16 mode
FAISS_ASSERT(useFloat16_);
return vectorsHalf_;
}
void FlatIndex::query(
Tensor<float, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<idx_t, 2, true>& outIndices,
bool exactDistance) {
auto stream = resources_->getDefaultStreamCurrentDevice();
if (useFloat16_) {
// We need to convert the input to float16 for comparison to ourselves
auto inputHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, input);
query(inputHalf,
k,
metric,
metricArg,
outDistances,
outIndices,
exactDistance);
} else {
bfKnnOnDevice(
resources_,
getCurrentDevice(),
stream,
vectors_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
}
void FlatIndex::query(
Tensor<half, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<idx_t, 2, true>& outIndices,
bool exactDistance) {
FAISS_ASSERT(useFloat16_);
bfKnnOnDevice(
resources_,
getCurrentDevice(),
resources_->getDefaultStreamCurrentDevice(),
vectorsHalf_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
void FlatIndex::computeResidual(
Tensor<float, 2, true>& vecs,
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& residuals) {
if (useFloat16_) {
runCalcResidual(
vecs,
getVectorsFloat16Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
} else {
runCalcResidual(
vecs,
getVectorsFloat32Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
}
}
void FlatIndex::reconstruct(
idx_t start,
idx_t num,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == num);
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(start, num, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(start, num, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::reconstruct(
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == ids.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(ids, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(ids, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::add(const float* data, idx_t numVecs, hipStream_t stream) {
if (numVecs == 0) {
return;
}
// convert and add to float16 data if needed
if (useFloat16_) {
// Make sure that `data` is on our device; we'll run the
// conversion on our device
auto devData = toDeviceTemporary<float, 2>(
resources_,
getCurrentDevice(),
(float*)data,
stream,
{numVecs, dim_});
auto devDataHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, devData);
rawData16_.append(
(char*)devDataHalf.data(),
devDataHalf.getSizeInBytes(),
stream,
true /* reserve exactly */);
} else {
// add to float32 data
rawData32_.append(
(char*)data,
(size_t)dim_ * numVecs * sizeof(float),
stream,
true /* reserve exactly */);
}
num_ += numVecs;
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
// Precompute L2 norms of our database
if (useFloat16_) {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{num_});
runL2Norm(vectorsHalf_, true, norms, true, stream);
norms_ = std::move(norms);
} else {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{num_});
runL2Norm(vectors_, true, norms, true, stream);
norms_ = std::move(norms);
}
}
void FlatIndex::reset() {
rawData32_.clear();
rawData16_.clear();
vectors_ = DeviceTensor<float, 2, true>();
vectorsHalf_ = DeviceTensor<half, 2, true>();
norms_ = DeviceTensor<float, 1, true>();
num_ = 0;
}
} // namespace gpu
} // namespace faiss
| aeeb759d81a462634412d5cab088b88aba427152.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
namespace faiss {
namespace gpu {
FlatIndex::FlatIndex(
GpuResources* res,
int dim,
bool useFloat16,
MemorySpace space)
: resources_(res),
dim_(dim),
useFloat16_(useFloat16),
space_(space),
num_(0),
rawData32_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())),
rawData16_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())) {}
bool FlatIndex::getUseFloat16() const {
return useFloat16_;
}
/// Returns the number of vectors we contain
idx_t FlatIndex::getSize() const {
if (useFloat16_) {
return vectorsHalf_.getSize(0);
} else {
return vectors_.getSize(0);
}
}
int FlatIndex::getDim() const {
return dim_;
}
void FlatIndex::reserve(size_t numVecs, cudaStream_t stream) {
if (useFloat16_) {
rawData16_.reserve(numVecs * dim_ * sizeof(half), stream);
} else {
rawData32_.reserve(numVecs * dim_ * sizeof(float), stream);
}
// The above may have caused a reallocation, we need to update the vector
// types
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
}
Tensor<float, 2, true>& FlatIndex::getVectorsFloat32Ref() {
// Should not call this unless we are in float32 mode
FAISS_ASSERT(!useFloat16_);
return vectors_;
}
Tensor<half, 2, true>& FlatIndex::getVectorsFloat16Ref() {
// Should not call this unless we are in float16 mode
FAISS_ASSERT(useFloat16_);
return vectorsHalf_;
}
void FlatIndex::query(
Tensor<float, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<idx_t, 2, true>& outIndices,
bool exactDistance) {
auto stream = resources_->getDefaultStreamCurrentDevice();
if (useFloat16_) {
// We need to convert the input to float16 for comparison to ourselves
auto inputHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, input);
query(inputHalf,
k,
metric,
metricArg,
outDistances,
outIndices,
exactDistance);
} else {
bfKnnOnDevice(
resources_,
getCurrentDevice(),
stream,
vectors_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
}
void FlatIndex::query(
Tensor<half, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<idx_t, 2, true>& outIndices,
bool exactDistance) {
FAISS_ASSERT(useFloat16_);
bfKnnOnDevice(
resources_,
getCurrentDevice(),
resources_->getDefaultStreamCurrentDevice(),
vectorsHalf_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
void FlatIndex::computeResidual(
Tensor<float, 2, true>& vecs,
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& residuals) {
if (useFloat16_) {
runCalcResidual(
vecs,
getVectorsFloat16Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
} else {
runCalcResidual(
vecs,
getVectorsFloat32Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
}
}
void FlatIndex::reconstruct(
idx_t start,
idx_t num,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == num);
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(start, num, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(start, num, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::reconstruct(
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == ids.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(ids, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(ids, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::add(const float* data, idx_t numVecs, cudaStream_t stream) {
if (numVecs == 0) {
return;
}
// convert and add to float16 data if needed
if (useFloat16_) {
// Make sure that `data` is on our device; we'll run the
// conversion on our device
auto devData = toDeviceTemporary<float, 2>(
resources_,
getCurrentDevice(),
(float*)data,
stream,
{numVecs, dim_});
auto devDataHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, devData);
rawData16_.append(
(char*)devDataHalf.data(),
devDataHalf.getSizeInBytes(),
stream,
true /* reserve exactly */);
} else {
// add to float32 data
rawData32_.append(
(char*)data,
(size_t)dim_ * numVecs * sizeof(float),
stream,
true /* reserve exactly */);
}
num_ += numVecs;
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
// Precompute L2 norms of our database
if (useFloat16_) {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{num_});
runL2Norm(vectorsHalf_, true, norms, true, stream);
norms_ = std::move(norms);
} else {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{num_});
runL2Norm(vectors_, true, norms, true, stream);
norms_ = std::move(norms);
}
}
void FlatIndex::reset() {
rawData32_.clear();
rawData16_.clear();
vectors_ = DeviceTensor<float, 2, true>();
vectorsHalf_ = DeviceTensor<half, 2, true>();
norms_ = DeviceTensor<float, 1, true>();
num_ = 0;
}
} // namespace gpu
} // namespace faiss
|
a32ba049e4c45b94b37ac2967c04c34c5f636905.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This program performs a convolution of randomly generated data or sound data
* from a .wav file if one is provided. The convolution is performed on the
* the CPU and then on the GPU. The speedup is calculated. The program is
* executed like this if no sound file is provided:
* ./blur -t <thread_count> -b <block_count>
* and like this if a sound file is provided:
* ./blur -t <thread_count> -b <block_count> -i <sound file> -o <output sound file>
*
* The thread count specifies the number of threads in a block. The block count
* specifies the maximum number of blocks.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sndfile.h>
#include <hip/hip_runtime.h>
/* macro to check a cuda call and exit if the call generates an error */
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
/* sound file struct */
typedef struct
{
int sndFile; //sound file given?
char * outputFile; //name of output file
float * inputSndData; //data from sound file
float * outputSndData; //data to write to output file
SF_INFO infInfo; //info about input sound file
} sndFileT;
/* CPU functions */
float gaussian(float x, float mean, float std);
float * generateBlurVector();
int isDecDigits(char * str);
void parseCmdLineArgs(int argc, char **argv, int * blocks, int * threads,
char ** inFile, char ** outFile);
void verbose();
int checkArgs(int blocks, int threads, char * inFile, char * outFile);
void getSoundData(sndFileT * snd, char * inFile, char * outFile);
void getInputData(sndFileT * snd, float * h_inputData, int nFrames,
int nChannels, int channel);
float cpuBlur(float * h_inputData, float * h_outputData, float * h_blurV,
int nFrames);
float gpuBlur(float * h_inputData, float * h_outputData, float * h_blurV,
int nFrames, int blocks, int threads);
void gaussianTests(sndFileT * snd, float * blurV, int blocks, int threads);
void compare(float * h_outputDataFromDev, float * h_outputData, int nFrames);
void writeSndData(sndFileT * snd);
/* GPU function */
__global__ void cudaBlurKernel(float * d_inputData, float * d_outputData,
float * blurV, int nFrames);
#define PI 3.14159265358979f
#define GAUSSIAN_SIDE_WIDTH 10
#define GAUSSIAN_SIZE (2 * GAUSSIAN_SIDE_WIDTH + 1)
/* computes gaussian function */
float gaussian(float x, float mean, float std)
{
return (1 / (std * sqrt(2 * PI))) *
exp(-1.0 / 2.0 * pow((x - mean) / std, 2));
}
/* uses gaussian function to compute a blur vector */
float * generateBlurVector()
{
//constants to use in gaussian function
float mean = 0.0;
float std = 5.0;
int i;
//create array to hold the blur vector values
float * blurV = (float *) malloc(sizeof(float) * GAUSSIAN_SIZE);
for (i = -GAUSSIAN_SIDE_WIDTH; i <= GAUSSIAN_SIDE_WIDTH; i++)
blurV[ GAUSSIAN_SIDE_WIDTH + i ] = gaussian((float)i, mean, std);
// Normalize to avoid clipping and/or hearing loss
float total = 0.0;
for (i = 0; i < GAUSSIAN_SIZE; i++)
total += blurV[i];
// Normalize by a factor of total
for (i = 0; i < GAUSSIAN_SIZE; i++)
blurV[i] /= total;
return blurV;
}
/* returns 1 if the string contains the characters '0' - '9' */
int isDecDigits(char * str)
{
int i;
int len = strlen(str);
for (i = 0; i < len; i++)
{
if (str[i] < '0' || str[i] > '9') return 0;
}
return 1;
}
/* parses the command line arguments to get the number of blocks,
threads, input file name, and output file name
*/
void parseCmdLineArgs(int argc, char **argv, int * blocks, int * threads,
char ** inFile, char ** outFile)
{
int i;
for (i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-b") == 0 && (i+1) < argc && isDecDigits(argv[i+1]))
(*blocks) = atoi(argv[i+1]);
else if (strcmp(argv[i], "-t") == 0 && (i+1) < argc && isDecDigits(argv[i+1]))
(*threads) = atoi(argv[i+1]);
else if (strcmp(argv[i], "-i") == 0 && (i+1) < argc)
(*inFile) = argv[i+1];
else if (strcmp(argv[i], "-o") == 0 && (i+1) < argc)
(*outFile) = argv[i+1];
else if (strcmp(argv[i], "-v") == 0)
verbose();
}
}
/* prints usage information and exits */
void verbose()
{
printf("Usage: blur -b <number of blocks> -t <threads per block> ");
printf("[-i <input wav file> -o <output wav file>]\n");
exit(0);
}
/* checks command line arguments.
Users must supply blocks and threads. The input file and output file
are optional but must be supplied together if used.
*/
int checkArgs(int blocks, int threads, char * inFile, char * outFile)
{
if (blocks == 0 || threads == 0) return 1;
if (inFile == NULL && outFile != 0) return 1;
if (inFile != NULL && outFile == 0) return 1;
return 0;
}
/* gets data for the convolution from either the data read from a sound file
or randomly generates it.
*/
void getInputData(sndFileT *snd, float * inputData,
int nFrames, int nChannels, int channel)
{
int i;
if (!(snd->sndFile))
{
for (i = 0; i < nFrames; i++)
inputData[i] = ((float) rand()) / RAND_MAX;
} else
{
for (i = 0; i < nFrames; i++)
inputData[i] = snd->inputSndData[(i * nChannels) + channel];
}
}
/* Opens the sound file and reads the contents, filling the
sndFileT struct. The sound file may contain multiple channels,
for example, stereo data.
*/
void getSoundData(sndFileT * snd, char * inFile, char * outFile)
{
SNDFILE *inf;
SF_INFO infInfo;
int amtRead;
// Open input audio file
inf = sf_open(inFile, SFM_READ, &infInfo);
if (!inf)
{
printf("Cannot open input file: %s\n", inFile);
verbose();
}
// Read audio
snd->infInfo = infInfo;
snd->outputFile = outFile;
snd->inputSndData = (float *) malloc(sizeof(float) * infInfo.frames * infInfo.channels);
snd->outputSndData = (float *) malloc(sizeof(float) * infInfo.frames * infInfo.channels);
amtRead = sf_read_float(inf, snd->inputSndData, infInfo.frames * infInfo.channels);
assert(amtRead == infInfo.frames * infInfo.channels);
sf_close(inf);
}
/* performs the gaussian tests, first on the CPU and then on the GPU */
void gaussianTests(sndFileT * snd, float * h_blurV, int blocks, int threads)
{
int i;
float cpuTime, gpuTime;
int nFrames = 1e7, nChannels = 1; //defaults
//if a sound file was given, use the frames and channels
//in the sound file
if (snd->sndFile)
{
nChannels = snd->infInfo.channels;
nFrames = snd->infInfo.frames;
}
//Host side: per channel input data
float * h_inputData = (float *) malloc(sizeof (float) * nFrames);
float * h_outputData = (float *) malloc(sizeof (float) * nFrames);
float * h_outputDataFromDev = (float *) malloc(sizeof(float) * nFrames);
for (i = 0; i < nChannels; i++)
{
getInputData(snd, h_inputData, nFrames, nChannels, i);
//perform the convolution on the CPU
printf("CPU Blurring ....\n");
cpuTime = cpuBlur(h_inputData, h_outputData, h_blurV, nFrames);
//perform the convolution on the GPU
printf("GPU Blurring ....\n");
gpuTime = gpuBlur(h_inputData, h_outputDataFromDev, h_blurV, nFrames,
blocks, threads);
//compare the results to make sure they match
printf("Comparing ... ");
compare(h_outputDataFromDev, h_outputData, nFrames);
printf("outputs match.\n");
printf("CPU time: %f milliseconds\n", cpuTime);
printf("GPU time: %f milliseconds\n", gpuTime);
printf("Speedup overall: %f\n", cpuTime / gpuTime);
//if a sound file was given, save the result to write
//later to an output file
if (snd->sndFile)
{
for (int j = 0; j < nFrames; j++)
snd->outputSndData[j * nChannels + i] = h_outputDataFromDev[j];
}
}
//if a sound file was given, save the result to the output file
if (snd->sndFile)
{
writeSndData(snd);
}
//free the dynamically allocated data
free(h_inputData);
free(h_outputData);
free(h_outputDataFromDev);
}
/* write the convoluted data to the output file */
void writeSndData(sndFileT * snd)
{
SNDFILE *outFile;
SF_INFO outInfo;
int amt = snd->infInfo.frames * snd->infInfo.channels;
outInfo = snd->infInfo;
outFile = sf_open(snd->outputFile, SFM_WRITE, &outInfo);
if (!outFile)
{
printf("Cannot open output file, exiting\n");
exit(EXIT_FAILURE);
}
sf_write_float(outFile, snd->outputSndData, amt);
sf_close(outFile);
}
/* compare the output computed by the CPU to the output computed by the GPU
It should be almost exactly the same.
*/
void compare(float * h_outputDataFromDev, float * h_outputData, int nFrames)
{
int i;
for (i = 0; i < nFrames; i++)
{
if (fabs(h_outputDataFromDev[i] - h_outputData[i]) >= 1e-6)
{
printf("Incorrect output at index %d: host: %f, device: %f\n",
i, h_outputData[i], h_outputDataFromDev[i]);
exit(0);
}
}
}
/* set up what is needed to perform the convolution on the GPU and launch
the kernel to do the convolution.
*/
float gpuBlur(float * h_inputData, float * h_outputDataFromDev, float * h_blurV, int nFrames,
int blocks, int threads)
{
//To Do: Use the blocks and threads passed in to set the
// blocks and threads for the grid and the blocks in the grid.
// Make sure the number of threads in a block is not larger
// than the hardware allowable number of threads per block.
// The blocks provided by the user is the maximum number of
// blocks, but blocks * threads can not be larger the
// number of nFrames. So blocks may need to be set to a
// a smaller value.
threads = 0; /* min(threads, .... ); */
blocks = 0; /* min(blocks, .....); */
//To Do: create the grid and the blocks (both one dimensional)
hipEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
float * d_inputData = NULL;
float * d_outputData = NULL;
float * d_blurV = NULL;
//To Do: allocate the arrays for the input, output and blur vector on the
// device side and set the arrays to the appropriate values
//use the cuda event functions for timing
CHECK(hipEventCreate(&start_gpu));
CHECK(hipEventCreate(&stop_gpu));
CHECK(hipEventRecord(start_gpu));
//To Do: launch the kernel
//check if the launch caused an error
CHECK(hipGetLastError());
//wait until threads are finished and get the time
CHECK(hipEventRecord(stop_gpu));
CHECK(hipEventSynchronize(stop_gpu)); //wait until the GPU is done
CHECK(hipEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
//To Do: copy the device output into the host h_outDataFromDev array
CHECK(hipMemcpy(h_outputDataFromDev, d_outputData,
(nFrames * sizeof(float)), hipMemcpyDeviceToHost));
//To Do: free the dynamically allocate device arrays
return gpuMsecTime;
}
/* perform the convolution on the gpu */
__global__ void cudaBlurKernel(float * d_inputData, float * d_outputData,
float * d_blurV, int nFrames)
{
/* To Do: Provide the code to perform the convolution on the device side. */
/* Use the cpuBlur function as a guide. */
}
/* perform the convolution on the cpu */
float cpuBlur(float * h_inputData, float * h_outputData, float * blurV, int nFrames)
{
int i, j;
float cpuMsecTime = -1;
memset(h_outputData, 0, nFrames * sizeof (float));
hipEvent_t start_cpu, stop_cpu;
CHECK(hipEventCreate(&start_cpu));
CHECK(hipEventCreate(&stop_cpu));
CHECK(hipEventRecord(start_cpu));
for (i = 0; i < GAUSSIAN_SIZE; i++)
{
for (j = 0; j <= i; j++)
{
h_outputData[i] += h_inputData[i - j] * blurV[j];
}
}
for (i = GAUSSIAN_SIZE; i < nFrames; i++)
{
for (j = 0; j < GAUSSIAN_SIZE; j++)
{
h_outputData[i] += h_inputData[i - j] * blurV[j];
}
}
// Stop timer
CHECK(hipEventRecord(stop_cpu));
CHECK(hipEventSynchronize(stop_cpu));
CHECK(hipEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
/* to run the convolution, the main must be supplied, at the minimum,
the number of blocks and threads created to perform the convolution
on the gpu. For example, 32 blocks and 1024 threads would be
specified like this:
./blur -b 32 -t 1024
Optionally, the user can also provide an input file and an output
file. The input file should be a wav file. The output file will
contain the convoluted input file. If an input file is not provided,
the program randomly generates data for the convolution.
*/
int main(int argc, char **argv)
{
int blocks = 0, threads = 0, badArgs = 0;
char *inFile = NULL, *outFile = NULL;
sndFileT snd;
float *blurV = NULL;
//parse the command line arguments and make sure they
//are good
parseCmdLineArgs(argc, argv, &blocks, &threads, &inFile, &outFile);
badArgs = checkArgs(blocks, threads, inFile, outFile);
if (badArgs) verbose();
snd.sndFile = (inFile != NULL) && (outFile != NULL);
//generate the blur vector
blurV = generateBlurVector();
//if a user provided a sound file, read the data
//and fill the snd sndFileT struct
if (snd.sndFile) getSoundData(&snd, inFile, outFile);
//perform the convolution on the CPU and the GPU
gaussianTests(&snd, blurV, blocks, threads);
//free the blurV and reset the GPU
free(blurV);
CHECK(hipDeviceReset());
}
| a32ba049e4c45b94b37ac2967c04c34c5f636905.cu | /*
* This program performs a convolution of randomly generated data or sound data
* from a .wav file if one is provided. The convolution is performed on the
* the CPU and then on the GPU. The speedup is calculated. The program is
* executed like this if no sound file is provided:
* ./blur -t <thread_count> -b <block_count>
* and like this if a sound file is provided:
* ./blur -t <thread_count> -b <block_count> -i <sound file> -o <output sound file>
*
* The thread count specifies the number of threads in a block. The block count
* specifies the maximum number of blocks.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sndfile.h>
#include <cuda_runtime.h>
/* macro to check a cuda call and exit if the call generates an error */
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
/* sound file struct */
typedef struct
{
int sndFile; //sound file given?
char * outputFile; //name of output file
float * inputSndData; //data from sound file
float * outputSndData; //data to write to output file
SF_INFO infInfo; //info about input sound file
} sndFileT;
/* CPU functions */
float gaussian(float x, float mean, float std);
float * generateBlurVector();
int isDecDigits(char * str);
void parseCmdLineArgs(int argc, char **argv, int * blocks, int * threads,
char ** inFile, char ** outFile);
void verbose();
int checkArgs(int blocks, int threads, char * inFile, char * outFile);
void getSoundData(sndFileT * snd, char * inFile, char * outFile);
void getInputData(sndFileT * snd, float * h_inputData, int nFrames,
int nChannels, int channel);
float cpuBlur(float * h_inputData, float * h_outputData, float * h_blurV,
int nFrames);
float gpuBlur(float * h_inputData, float * h_outputData, float * h_blurV,
int nFrames, int blocks, int threads);
void gaussianTests(sndFileT * snd, float * blurV, int blocks, int threads);
void compare(float * h_outputDataFromDev, float * h_outputData, int nFrames);
void writeSndData(sndFileT * snd);
/* GPU function */
__global__ void cudaBlurKernel(float * d_inputData, float * d_outputData,
float * blurV, int nFrames);
#define PI 3.14159265358979f
#define GAUSSIAN_SIDE_WIDTH 10
#define GAUSSIAN_SIZE (2 * GAUSSIAN_SIDE_WIDTH + 1)
/* computes gaussian function */
float gaussian(float x, float mean, float std)
{
return (1 / (std * sqrt(2 * PI))) *
exp(-1.0 / 2.0 * pow((x - mean) / std, 2));
}
/* uses gaussian function to compute a blur vector */
float * generateBlurVector()
{
//constants to use in gaussian function
float mean = 0.0;
float std = 5.0;
int i;
//create array to hold the blur vector values
float * blurV = (float *) malloc(sizeof(float) * GAUSSIAN_SIZE);
for (i = -GAUSSIAN_SIDE_WIDTH; i <= GAUSSIAN_SIDE_WIDTH; i++)
blurV[ GAUSSIAN_SIDE_WIDTH + i ] = gaussian((float)i, mean, std);
// Normalize to avoid clipping and/or hearing loss
float total = 0.0;
for (i = 0; i < GAUSSIAN_SIZE; i++)
total += blurV[i];
// Normalize by a factor of total
for (i = 0; i < GAUSSIAN_SIZE; i++)
blurV[i] /= total;
return blurV;
}
/* returns 1 if the string contains the characters '0' - '9' */
int isDecDigits(char * str)
{
int i;
int len = strlen(str);
for (i = 0; i < len; i++)
{
if (str[i] < '0' || str[i] > '9') return 0;
}
return 1;
}
/* parses the command line arguments to get the number of blocks,
threads, input file name, and output file name
*/
void parseCmdLineArgs(int argc, char **argv, int * blocks, int * threads,
char ** inFile, char ** outFile)
{
int i;
for (i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-b") == 0 && (i+1) < argc && isDecDigits(argv[i+1]))
(*blocks) = atoi(argv[i+1]);
else if (strcmp(argv[i], "-t") == 0 && (i+1) < argc && isDecDigits(argv[i+1]))
(*threads) = atoi(argv[i+1]);
else if (strcmp(argv[i], "-i") == 0 && (i+1) < argc)
(*inFile) = argv[i+1];
else if (strcmp(argv[i], "-o") == 0 && (i+1) < argc)
(*outFile) = argv[i+1];
else if (strcmp(argv[i], "-v") == 0)
verbose();
}
}
/* prints usage information and exits */
void verbose()
{
printf("Usage: blur -b <number of blocks> -t <threads per block> ");
printf("[-i <input wav file> -o <output wav file>]\n");
exit(0);
}
/* checks command line arguments.
Users must supply blocks and threads. The input file and output file
are optional but must be supplied together if used.
*/
int checkArgs(int blocks, int threads, char * inFile, char * outFile)
{
if (blocks == 0 || threads == 0) return 1;
if (inFile == NULL && outFile != 0) return 1;
if (inFile != NULL && outFile == 0) return 1;
return 0;
}
/* gets data for the convolution from either the data read from a sound file
or randomly generates it.
*/
void getInputData(sndFileT *snd, float * inputData,
int nFrames, int nChannels, int channel)
{
int i;
if (!(snd->sndFile))
{
for (i = 0; i < nFrames; i++)
inputData[i] = ((float) rand()) / RAND_MAX;
} else
{
for (i = 0; i < nFrames; i++)
inputData[i] = snd->inputSndData[(i * nChannels) + channel];
}
}
/* Opens the sound file and reads the contents, filling the
sndFileT struct. The sound file may contain multiple channels,
for example, stereo data.
*/
void getSoundData(sndFileT * snd, char * inFile, char * outFile)
{
SNDFILE *inf;
SF_INFO infInfo;
int amtRead;
// Open input audio file
inf = sf_open(inFile, SFM_READ, &infInfo);
if (!inf)
{
printf("Cannot open input file: %s\n", inFile);
verbose();
}
// Read audio
snd->infInfo = infInfo;
snd->outputFile = outFile;
snd->inputSndData = (float *) malloc(sizeof(float) * infInfo.frames * infInfo.channels);
snd->outputSndData = (float *) malloc(sizeof(float) * infInfo.frames * infInfo.channels);
amtRead = sf_read_float(inf, snd->inputSndData, infInfo.frames * infInfo.channels);
assert(amtRead == infInfo.frames * infInfo.channels);
sf_close(inf);
}
/* performs the gaussian tests, first on the CPU and then on the GPU */
void gaussianTests(sndFileT * snd, float * h_blurV, int blocks, int threads)
{
int i;
float cpuTime, gpuTime;
int nFrames = 1e7, nChannels = 1; //defaults
//if a sound file was given, use the frames and channels
//in the sound file
if (snd->sndFile)
{
nChannels = snd->infInfo.channels;
nFrames = snd->infInfo.frames;
}
//Host side: per channel input data
float * h_inputData = (float *) malloc(sizeof (float) * nFrames);
float * h_outputData = (float *) malloc(sizeof (float) * nFrames);
float * h_outputDataFromDev = (float *) malloc(sizeof(float) * nFrames);
for (i = 0; i < nChannels; i++)
{
getInputData(snd, h_inputData, nFrames, nChannels, i);
//perform the convolution on the CPU
printf("CPU Blurring ....\n");
cpuTime = cpuBlur(h_inputData, h_outputData, h_blurV, nFrames);
//perform the convolution on the GPU
printf("GPU Blurring ....\n");
gpuTime = gpuBlur(h_inputData, h_outputDataFromDev, h_blurV, nFrames,
blocks, threads);
//compare the results to make sure they match
printf("Comparing ... ");
compare(h_outputDataFromDev, h_outputData, nFrames);
printf("outputs match.\n");
printf("CPU time: %f milliseconds\n", cpuTime);
printf("GPU time: %f milliseconds\n", gpuTime);
printf("Speedup overall: %f\n", cpuTime / gpuTime);
//if a sound file was given, save the result to write
//later to an output file
if (snd->sndFile)
{
for (int j = 0; j < nFrames; j++)
snd->outputSndData[j * nChannels + i] = h_outputDataFromDev[j];
}
}
//if a sound file was given, save the result to the output file
if (snd->sndFile)
{
writeSndData(snd);
}
//free the dynamically allocated data
free(h_inputData);
free(h_outputData);
free(h_outputDataFromDev);
}
/* write the convoluted data to the output file */
void writeSndData(sndFileT * snd)
{
SNDFILE *outFile;
SF_INFO outInfo;
int amt = snd->infInfo.frames * snd->infInfo.channels;
outInfo = snd->infInfo;
outFile = sf_open(snd->outputFile, SFM_WRITE, &outInfo);
if (!outFile)
{
printf("Cannot open output file, exiting\n");
exit(EXIT_FAILURE);
}
sf_write_float(outFile, snd->outputSndData, amt);
sf_close(outFile);
}
/* compare the output computed by the CPU to the output computed by the GPU
It should be almost exactly the same.
*/
void compare(float * h_outputDataFromDev, float * h_outputData, int nFrames)
{
int i;
for (i = 0; i < nFrames; i++)
{
if (fabs(h_outputDataFromDev[i] - h_outputData[i]) >= 1e-6)
{
printf("Incorrect output at index %d: host: %f, device: %f\n",
i, h_outputData[i], h_outputDataFromDev[i]);
exit(0);
}
}
}
/* set up what is needed to perform the convolution on the GPU and launch
the kernel to do the convolution.
*/
float gpuBlur(float * h_inputData, float * h_outputDataFromDev, float * h_blurV, int nFrames,
int blocks, int threads)
{
//To Do: Use the blocks and threads passed in to set the
// blocks and threads for the grid and the blocks in the grid.
// Make sure the number of threads in a block is not larger
// than the hardware allowable number of threads per block.
// The blocks provided by the user is the maximum number of
// blocks, but blocks * threads can not be larger the
// number of nFrames. So blocks may need to be set to a
// a smaller value.
threads = 0; /* min(threads, .... ); */
blocks = 0; /* min(blocks, .....); */
//To Do: create the grid and the blocks (both one dimensional)
cudaEvent_t start_gpu, stop_gpu;
float gpuMsecTime = -1;
float * d_inputData = NULL;
float * d_outputData = NULL;
float * d_blurV = NULL;
//To Do: allocate the arrays for the input, output and blur vector on the
// device side and set the arrays to the appropriate values
//use the cuda event functions for timing
CHECK(cudaEventCreate(&start_gpu));
CHECK(cudaEventCreate(&stop_gpu));
CHECK(cudaEventRecord(start_gpu));
//To Do: launch the kernel
//check if the launch caused an error
CHECK(cudaGetLastError());
//wait until threads are finished and get the time
CHECK(cudaEventRecord(stop_gpu));
CHECK(cudaEventSynchronize(stop_gpu)); //wait until the GPU is done
CHECK(cudaEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu));
//To Do: copy the device output into the host h_outDataFromDev array
CHECK(cudaMemcpy(h_outputDataFromDev, d_outputData,
(nFrames * sizeof(float)), cudaMemcpyDeviceToHost));
//To Do: free the dynamically allocate device arrays
return gpuMsecTime;
}
/* perform the convolution on the gpu */
__global__ void cudaBlurKernel(float * d_inputData, float * d_outputData,
float * d_blurV, int nFrames)
{
/* To Do: Provide the code to perform the convolution on the device side. */
/* Use the cpuBlur function as a guide. */
}
/* perform the convolution on the cpu */
float cpuBlur(float * h_inputData, float * h_outputData, float * blurV, int nFrames)
{
int i, j;
float cpuMsecTime = -1;
memset(h_outputData, 0, nFrames * sizeof (float));
cudaEvent_t start_cpu, stop_cpu;
CHECK(cudaEventCreate(&start_cpu));
CHECK(cudaEventCreate(&stop_cpu));
CHECK(cudaEventRecord(start_cpu));
for (i = 0; i < GAUSSIAN_SIZE; i++)
{
for (j = 0; j <= i; j++)
{
h_outputData[i] += h_inputData[i - j] * blurV[j];
}
}
for (i = GAUSSIAN_SIZE; i < nFrames; i++)
{
for (j = 0; j < GAUSSIAN_SIZE; j++)
{
h_outputData[i] += h_inputData[i - j] * blurV[j];
}
}
// Stop timer
CHECK(cudaEventRecord(stop_cpu));
CHECK(cudaEventSynchronize(stop_cpu));
CHECK(cudaEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
/* to run the convolution, the main must be supplied, at the minimum,
the number of blocks and threads created to perform the convolution
on the gpu. For example, 32 blocks and 1024 threads would be
specified like this:
./blur -b 32 -t 1024
Optionally, the user can also provide an input file and an output
file. The input file should be a wav file. The output file will
contain the convoluted input file. If an input file is not provided,
the program randomly generates data for the convolution.
*/
int main(int argc, char **argv)
{
int blocks = 0, threads = 0, badArgs = 0;
char *inFile = NULL, *outFile = NULL;
sndFileT snd;
float *blurV = NULL;
//parse the command line arguments and make sure they
//are good
parseCmdLineArgs(argc, argv, &blocks, &threads, &inFile, &outFile);
badArgs = checkArgs(blocks, threads, inFile, outFile);
if (badArgs) verbose();
snd.sndFile = (inFile != NULL) && (outFile != NULL);
//generate the blur vector
blurV = generateBlurVector();
//if a user provided a sound file, read the data
//and fill the snd sndFileT struct
if (snd.sndFile) getSoundData(&snd, inFile, outFile);
//perform the convolution on the CPU and the GPU
gaussianTests(&snd, blurV, blocks, threads);
//free the blurV and reset the GPU
free(blurV);
CHECK(cudaDeviceReset());
}
|
7efc372a2b4dfd7c137ecb7f3d2f9239da02fc5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
int column = ( blockDim.x * blockIdx.x ) + threadIdx.x;
int row = ( blockDim.y * blockIdx.y ) + threadIdx.y;
if (row < MATRIX_SIZE && column < MATRIX_SIZE)
{
float sum = 0;
for(int k = 0; k < MATRIX_SIZE; k++)
{
//sum + = M.elements[row * MATRIX_SIZE + k] * N.elements[k * MATRIX_SIZE + column];
//printf("index M is [%d] and index N is [%d]\n",row * MATRIX_SIZE + k,k*MATRIX_SIZE + column);
sum = sum + M.elements[row * MATRIX_SIZE + k] * N.elements[k*MATRIX_SIZE + column];
//printf("index M is [%d] --->[%lf] and index N is [%d]---> [%lf], sum is [%lf]\n",row * MATRIX_SIZE + k,M.elements[column * MATRIX_SIZE + k],k*MATRIX_SIZE + column,N.elements[k*MATRIX_SIZE + row],sum);
//printf("sum is %lf\n",sum);
}
//printf("sum Equalt to %f and save to P[%d], row is [%d], and clumn is [%d]\n",sum,row * MATRIX_SIZE + column,row,column);
P.elements[row * MATRIX_SIZE + column] = sum ;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 7efc372a2b4dfd7c137ecb7f3d2f9239da02fc5e.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
//Multiply the two matrices
int column = ( blockDim.x * blockIdx.x ) + threadIdx.x;
int row = ( blockDim.y * blockIdx.y ) + threadIdx.y;
if (row < MATRIX_SIZE && column < MATRIX_SIZE)
{
float sum = 0;
for(int k = 0; k < MATRIX_SIZE; k++)
{
//sum + = M.elements[row * MATRIX_SIZE + k] * N.elements[k * MATRIX_SIZE + column];
//printf("index M is [%d] and index N is [%d]\n",row * MATRIX_SIZE + k,k*MATRIX_SIZE + column);
sum = sum + M.elements[row * MATRIX_SIZE + k] * N.elements[k*MATRIX_SIZE + column];
//printf("index M is [%d] --->[%lf] and index N is [%d]---> [%lf], sum is [%lf]\n",row * MATRIX_SIZE + k,M.elements[column * MATRIX_SIZE + k],k*MATRIX_SIZE + column,N.elements[k*MATRIX_SIZE + row],sum);
//printf("sum is %lf\n",sum);
}
//printf("sum Equalt to %f and save to P[%d], row is [%d], and clumn is [%d]\n",sum,row * MATRIX_SIZE + column,row,column);
P.elements[row * MATRIX_SIZE + column] = sum ;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
69a175821ff296b3ea76f44ae9425210274a5c5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
ssymv.cu is nearly identical to ssymv.cu, just change names and drop MAGMA_S_CONJ.
ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to
ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> s, Thu Oct 8 23:05:34 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_S_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_sum(
int n,
float alpha,
int lda,
float beta,
float * __restrict__ y, int incy,
float const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
float Ax = MAGMA_S_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) REAL array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements ssymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_ssymv_work requires users to provide a workspace, while
magmablas_ssymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call ssymv frequently, we suggest using
magmablas_ssymv_work instead of magmablas_ssymv. As the overhead to
allocate and free in device memory in magmablas_ssymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_work(
magma_uplo_t uplo, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_ssymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( ssymv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( ssymv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( ssymv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( ssymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_ssymv_work
/***************************************************************************//**
Purpose
-------
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv(
magma_uplo_t uplo, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_ssymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return info;
magmaFloat_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_smalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_ssymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_ssymv
| 69a175821ff296b3ea76f44ae9425210274a5c5c.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
ssymv.cu is nearly identical to ssymv.cu, just change names and drop MAGMA_S_CONJ.
ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to
ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> s, Thu Oct 8 23:05:34 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_S_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_sum(
int n,
float alpha,
int lda,
float beta,
float * __restrict__ y, int incy,
float const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
float Ax = MAGMA_S_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) REAL array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements ssymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_ssymv_work requires users to provide a workspace, while
magmablas_ssymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call ssymv frequently, we suggest using
magmablas_ssymv_work instead of magmablas_ssymv. As the overhead to
allocate and free in device memory in magmablas_ssymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_work(
magma_uplo_t uplo, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_ssymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
ssymv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
ssymv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
ssymv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
ssymv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_ssymv_work
/***************************************************************************//**
Purpose
-------
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv(
magma_uplo_t uplo, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dx, magma_int_t incx,
float beta,
magmaFloat_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_ssymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return info;
magmaFloat_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_smalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_ssymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_ssymv
|
e72f44c9a4eb5527146d5e464434d3d1e767cd13.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a
mixed-precision floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing
tensor and matrix memory allocations, initializing and comparing results, and
computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions
appear namespace `cutlass::` or an inner namespace therein. Operations in
`cutlass::reference::` have both host-side and device-side implementations,
and the choice to use device-side initialization and host-side verification in
this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is
functional in host and device code. In host-side code, CUTLASS_ENABLE_F16C
optionally enables harware-accelerated numeric conversion on x86-64 CPUs
support F16C extensions. In device code, all available hardware is used to
implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported
layouts. It simplifies allocation and management of host- and device- memory
allocations.
This class offers methods device_view() and host_view() to provide
TensorView objects for device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian
distribution. It uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template
supports unique data types for each matrix operand, the internal accumulation
type, and the scalar parameters alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit
equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning(disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t cutlass_hgemm_nn(int M, int N, int K, cutlass::half_t alpha,
cutlass::half_t const* A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const* B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta, cutlass::half_t* C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op(
{{M, N, K}, {A, lda}, {B, ldb}, {C, ldc}, {C, ldc}, {alpha, beta}});
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha,
cutlass::half_t beta) {
hipError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device
// corresponding to rank=2 tensors in column-major layout. Explicit
// synchronization methods are offered to copy the tensor to the device or
// to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(
cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(
cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor>
C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor>
C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(), seed, mean, stddev, bits_less_than_one);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(), seed * 2019, mean, stddev, bits_less_than_one);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(), seed * 1993, mean, stddev,
bits_less_than_one);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(M, N, K, alpha, A.device_data(), A.stride(0),
B.device_data(), B.stride(0), beta,
C_cutlass.device_data(), C_cutlass.stride(0));
if (result != hipSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this
// example is to use the host-side reference GEMM, so we must perform a
// device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference
// implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t, cutlass::half_t>
gemm_ref;
gemm_ref({M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t,
// ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t,
// ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t,
// ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(C_reference.host_view(),
C_cutlass.host_view())) {
char const* filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. "
"Wrote computed and reference results to '"
<< filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host
// memory as arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return hipErrorUnknown;
}
// Passed error check
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char* arg[]) {
//
// This example uses half-precision and is only suitable for devices with
// compute capabitliy 5.3 or greater.
//
hipDeviceProp_t prop;
hipError_t result = hipGetDeviceProperties(&prop, 0);
if (result != hipSuccess) {
std::cerr << "Failed to query device properties with error "
<< hipGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable "
"for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability "
<< prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = {128, 128, 128};
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values
// stored as cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or
// underflow to zero.
//
cutlass::half_t scalars[2] = {1.0_hf, 0.0_hf};
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| e72f44c9a4eb5527146d5e464434d3d1e767cd13.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a
mixed-precision floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing
tensor and matrix memory allocations, initializing and comparing results, and
computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions
appear namespace `cutlass::` or an inner namespace therein. Operations in
`cutlass::reference::` have both host-side and device-side implementations,
and the choice to use device-side initialization and host-side verification in
this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is
functional in host and device code. In host-side code, CUTLASS_ENABLE_F16C
optionally enables harware-accelerated numeric conversion on x86-64 CPUs
support F16C extensions. In device code, all available hardware is used to
implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported
layouts. It simplifies allocation and management of host- and device- memory
allocations.
This class offers methods device_view() and host_view() to provide
TensorView objects for device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian
distribution. It uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template
supports unique data types for each matrix operand, the internal accumulation
type, and the scalar parameters alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit
equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning(disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(int M, int N, int K, cutlass::half_t alpha,
cutlass::half_t const* A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const* B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta, cutlass::half_t* C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op(
{{M, N, K}, {A, lda}, {B, ldb}, {C, ldc}, {C, ldc}, {alpha, beta}});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha,
cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device
// corresponding to rank=2 tensors in column-major layout. Explicit
// synchronization methods are offered to copy the tensor to the device or
// to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(
cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(
cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor>
C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor>
C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(), seed, mean, stddev, bits_less_than_one);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(), seed * 2019, mean, stddev, bits_less_than_one);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(), seed * 1993, mean, stddev,
bits_less_than_one);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(M, N, K, alpha, A.device_data(), A.stride(0),
B.device_data(), B.stride(0), beta,
C_cutlass.device_data(), C_cutlass.stride(0));
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this
// example is to use the host-side reference GEMM, so we must perform a
// device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference
// implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t, cutlass::half_t>
gemm_ref;
gemm_ref({M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t,
// ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t,
// ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t,
// ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(C_reference.host_view(),
C_cutlass.host_view())) {
char const* filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. "
"Wrote computed and reference results to '"
<< filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host
// memory as arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char* arg[]) {
//
// This example uses half-precision and is only suitable for devices with
// compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error "
<< cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable "
"for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability "
<< prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = {128, 128, 128};
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values
// stored as cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or
// underflow to zero.
//
cutlass::half_t scalars[2] = {1.0_hf, 0.0_hf};
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
98d3bc6e181186b07b131a6efb1f7fbe4a0412d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
#include "cudautils.h"
#include "cudaSiftD.h"
#include "cudaSift.h"
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ float d_Threshold[2];
__constant__ float d_Scales[8], d_Factor;
__constant__ float d_EdgeLimit;
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[1];
__constant__ float d_Kernel1[5];
__constant__ float d_Kernel2[12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter and subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
}
__global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEUP_W/2 + 2)
#define BH (SCALEUP_H/2 + 2)
__shared__ float buffer[BW*BH];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
if (tx<BW && ty<BH) {
int x = min(max(blockIdx.x*(SCALEUP_W/2) + tx - 1, 0), width-1);
int y = min(max(blockIdx.y*(SCALEUP_H/2) + ty - 1, 0), height-1);
buffer[ty*BW + tx] = d_Data[y*pitch + x];
}
__syncthreads();
int x = blockIdx.x*SCALEUP_W + tx;
int y = blockIdx.y*SCALEUP_H + ty;
if (x<2*width && y<2*height) {
int bx = (tx + 1)/2;
int by = (ty + 1)/2;
int bp = by*BW + bx;
float wx = 0.25f + (tx&1)*0.50f;
float wy = 0.25f + (ty&1)*0.50f;
d_Result[y*newpitch + x] = wy*(wx*buffer[bp] + (1.0f-wx)*buffer[bp+1]) +
(1.0f-wy)*(wx*buffer[bp+BW] + (1.0f-wx)*buffer[bp+BW+1]);
}
}
__global__ void ExtractSiftDescriptors(hipTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=1;i<=16;i*=2)
sum += __shfl_xor(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=1;i<=16;i*=2)
sum += __shfl_xor(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__global__ void ExtractSiftDescriptorsOld(hipTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[128];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
buffer[idx] = buffer[idx] * rsqrtf(tsum1);
if (buffer[idx]>0.2f)
buffer[idx] = 0.2f;
__syncthreads();
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = buffer[idx] * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__global__ void RescalePositions(SiftPoint *d_sift, int numPts, float scale)
{
int num = blockIdx.x*blockDim.x + threadIdx.x;
if (num<numPts) {
d_sift[num].xpos *= scale;
d_sift[num].ypos *= scale;
d_sift[num].scale *= scale;
}
}
__global__ void ComputeOrientations(hipTextureObject_t texObj, SiftPoint *d_Sift, int fstPts)
{
__shared__ float hist[64];
__shared__ float gauss[11];
const int tx = threadIdx.x;
const int bx = blockIdx.x + fstPts;
float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale);
if (tx<11)
gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5));
if (tx<64)
hist[tx] = 0.0f;
__syncthreads();
float xp = d_Sift[bx].xpos - 5.0f;
float yp = d_Sift[bx].ypos - 5.0f;
int yd = tx/11;
int xd = tx - yd*11;
float xf = xp + xd;
float yf = yp + yd;
if (yd<11) {
float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf);
float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0);
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin>31)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+31);
int x1p = (tx<=30 ? tx+1 : tx-31);
if (tx<32) {
int x2m = (tx>=2 ? tx-2 : tx+30);
int x2p = (tx<=29 ? tx+2 : tx-30);
hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]);
}
__syncthreads();
if (tx<32) {
float v = hist[32+tx];
hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);
if (maxval2>0.8f*maxval1 && false) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = d_Sift[bx].scale;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Subtract two images (multi-scale version)
///////////////////////////////////////////////////////////////////////////////
__global__ void FindPointsMulti(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, int nScales, float subsampling, float lowestScale)
{
#define MEMWID (MINMAX_W + 2)
__shared__ float ymin1[MEMWID], ymin2[MEMWID], ymin3[MEMWID];
__shared__ float ymax1[MEMWID], ymax2[MEMWID], ymax3[MEMWID];
__shared__ unsigned int cnt;
__shared__ unsigned short points[96];
int tx = threadIdx.x;
int block = blockIdx.x/nScales;
int scale = blockIdx.x - nScales*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
if (tx==0)
cnt = 0;
__syncthreads();
int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H);
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr1 = ptr + ypos*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
{
float d10 = d_Data0[yptr0];
float d11 = d_Data0[yptr1];
float d12 = d_Data0[yptr2];
ymin1[tx] = fminf(fminf(d10, d11), d12);
ymax1[tx] = fmaxf(fmaxf(d10, d11), d12);
}
{
float d30 = d_Data0[yptr0 + 2*size];
float d31 = d_Data0[yptr1 + 2*size];
float d32 = d_Data0[yptr2 + 2*size];
ymin3[tx] = fminf(fminf(d30, d31), d32);
ymax3[tx] = fmaxf(fmaxf(d30, d31), d32);
}
float d20 = d_Data0[yptr0 + 1*size];
float d21 = d_Data0[yptr1 + 1*size];
float d22 = d_Data0[yptr2 + 1*size];
ymin2[tx] = fminf(fminf(ymin1[tx], fminf(fminf(d20, d21), d22)), ymin3[tx]);
ymax2[tx] = fmaxf(fmaxf(ymax1[tx], fmaxf(fmaxf(d20, d21), d22)), ymax3[tx]);
__syncthreads();
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) {
if (d21<d_Threshold[1]) {
float minv = fminf(fminf(fminf(ymin2[tx-1], ymin2[tx+1]), ymin1[tx]), ymin3[tx]);
minv = fminf(fminf(minv, d20), d22);
if (d21<minv) {
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
if (d21>d_Threshold[0]) {
float maxv = fmaxf(fmaxf(fmaxf(ymax2[tx-1], ymax2[tx+1]), ymax1[tx]), ymax3[tx]);
maxv = fmaxf(fmaxf(maxv, d20), d22);
if (d21>maxv) {
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
}
__syncthreads();
}
if (tx<cnt) {
int xpos = points[3*tx+0];
int ypos = points[3*tx+1];
int scale = points[3*tx+2];
int ptr = xpos + (ypos + (scale+1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<d_EdgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = d_Scales[scale] * exp2f(pds*d_Factor);
if (sc>=lowestScale) {
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void LaplaceMultiTex(hipTextureObject_t texObj, float *d_Result, int width, int pitch, int height)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_Kernel2 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float x = xp-3.5;
float y = yp+0.5;
sdata1[tx] = kernel[4]*tex2D<float>(texObj, x, y) +
kernel[3]*(tex2D<float>(texObj, x, y-1.0) + tex2D<float>(texObj, x, y+1.0)) +
kernel[2]*(tex2D<float>(texObj, x, y-2.0) + tex2D<float>(texObj, x, y+2.0)) +
kernel[1]*(tex2D<float>(texObj, x, y-3.0) + tex2D<float>(texObj, x, y+3.0)) +
kernel[0]*(tex2D<float>(texObj, x, y-4.0) + tex2D<float>(texObj, x, y+4.0));
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[4]*sdata1[tx+4] +
kernel[3]*(sdata1[tx+3] + sdata1[tx+5]) +
kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[1]*(sdata1[tx+1] + sdata1[tx+7]) +
kernel[0]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LaplaceMultiMem(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_Kernel2 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float *data = d_Image + max(min(xp - 4, width-1), 0);
int h = height-1;
sdata1[tx] = kernel[4]*data[min(yp, h)*pitch] +
kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[4]*sdata1[tx+4] +
kernel[3]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[1]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[0]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LowPass(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float buffer[(LOWPASS_W + 2*LOWPASS_R)*LOWPASS_H];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*LOWPASS_W + tx;
const int yp = blockIdx.y*LOWPASS_H + ty;
float *kernel = d_Kernel2;
float *data = d_Image + max(min(xp - 4, width-1), 0);
float *buff = buffer + ty*(LOWPASS_W + 2*LOWPASS_R);
int h = height-1;
if (yp<height)
buff[tx] = kernel[4]*data[min(yp, h)*pitch] +
kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
if (tx<LOWPASS_W && xp<width && yp<height) {
d_Result[yp*pitch + xp] = kernel[4]*buff[tx+4] +
kernel[3]*(buff[tx+3] + buff[tx+5]) + kernel[2]*(buff[tx+2] + buff[tx+6]) +
kernel[1]*(buff[tx+1] + buff[tx+7]) + kernel[0]*(buff[tx+0] + buff[tx+8]);
}
}
| 98d3bc6e181186b07b131a6efb1f7fbe4a0412d7.cu | //********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
#include "cudautils.h"
#include "cudaSiftD.h"
#include "cudaSift.h"
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ float d_Threshold[2];
__constant__ float d_Scales[8], d_Factor;
__constant__ float d_EdgeLimit;
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[1];
__constant__ float d_Kernel1[5];
__constant__ float d_Kernel2[12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter and subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
const float *k = d_Kernel1;
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 1;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx0] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k[2]*brow[tx2] + k[0]*(brow[tx0]+brow[tx4]) + k[1]*(brow[tx1]+brow[tx3]);
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx1] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2)
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
__syncthreads();
if (tx<dx2 && !(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
}
__global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEUP_W/2 + 2)
#define BH (SCALEUP_H/2 + 2)
__shared__ float buffer[BW*BH];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
if (tx<BW && ty<BH) {
int x = min(max(blockIdx.x*(SCALEUP_W/2) + tx - 1, 0), width-1);
int y = min(max(blockIdx.y*(SCALEUP_H/2) + ty - 1, 0), height-1);
buffer[ty*BW + tx] = d_Data[y*pitch + x];
}
__syncthreads();
int x = blockIdx.x*SCALEUP_W + tx;
int y = blockIdx.y*SCALEUP_H + ty;
if (x<2*width && y<2*height) {
int bx = (tx + 1)/2;
int by = (ty + 1)/2;
int bp = by*BW + bx;
float wx = 0.25f + (tx&1)*0.50f;
float wy = 0.25f + (ty&1)*0.50f;
d_Result[y*newpitch + x] = wy*(wx*buffer[bp] + (1.0f-wx)*buffer[bp+1]) +
(1.0f-wy)*(wx*buffer[bp+BW] + (1.0f-wx)*buffer[bp+BW+1]);
}
}
__global__ void ExtractSiftDescriptors(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=1;i<=16;i*=2)
sum += __shfl_xor(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=1;i<=16;i*=2)
sum += __shfl_xor(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__global__ void ExtractSiftDescriptorsOld(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[128];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
buffer[idx] = buffer[idx] * rsqrtf(tsum1);
if (buffer[idx]>0.2f)
buffer[idx] = 0.2f;
__syncthreads();
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = buffer[idx] * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__global__ void RescalePositions(SiftPoint *d_sift, int numPts, float scale)
{
int num = blockIdx.x*blockDim.x + threadIdx.x;
if (num<numPts) {
d_sift[num].xpos *= scale;
d_sift[num].ypos *= scale;
d_sift[num].scale *= scale;
}
}
__global__ void ComputeOrientations(cudaTextureObject_t texObj, SiftPoint *d_Sift, int fstPts)
{
__shared__ float hist[64];
__shared__ float gauss[11];
const int tx = threadIdx.x;
const int bx = blockIdx.x + fstPts;
float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale);
if (tx<11)
gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5));
if (tx<64)
hist[tx] = 0.0f;
__syncthreads();
float xp = d_Sift[bx].xpos - 5.0f;
float yp = d_Sift[bx].ypos - 5.0f;
int yd = tx/11;
int xd = tx - yd*11;
float xf = xp + xd;
float yf = yp + yd;
if (yd<11) {
float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf);
float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0);
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin>31)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+31);
int x1p = (tx<=30 ? tx+1 : tx-31);
if (tx<32) {
int x2m = (tx>=2 ? tx-2 : tx+30);
int x2p = (tx<=29 ? tx+2 : tx-30);
hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]);
}
__syncthreads();
if (tx<32) {
float v = hist[32+tx];
hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);
if (maxval2>0.8f*maxval1 && false) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = d_Sift[bx].scale;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Subtract two images (multi-scale version)
///////////////////////////////////////////////////////////////////////////////
__global__ void FindPointsMulti(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, int nScales, float subsampling, float lowestScale)
{
#define MEMWID (MINMAX_W + 2)
__shared__ float ymin1[MEMWID], ymin2[MEMWID], ymin3[MEMWID];
__shared__ float ymax1[MEMWID], ymax2[MEMWID], ymax3[MEMWID];
__shared__ unsigned int cnt;
__shared__ unsigned short points[96];
int tx = threadIdx.x;
int block = blockIdx.x/nScales;
int scale = blockIdx.x - nScales*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
if (tx==0)
cnt = 0;
__syncthreads();
int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H);
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr1 = ptr + ypos*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
{
float d10 = d_Data0[yptr0];
float d11 = d_Data0[yptr1];
float d12 = d_Data0[yptr2];
ymin1[tx] = fminf(fminf(d10, d11), d12);
ymax1[tx] = fmaxf(fmaxf(d10, d11), d12);
}
{
float d30 = d_Data0[yptr0 + 2*size];
float d31 = d_Data0[yptr1 + 2*size];
float d32 = d_Data0[yptr2 + 2*size];
ymin3[tx] = fminf(fminf(d30, d31), d32);
ymax3[tx] = fmaxf(fmaxf(d30, d31), d32);
}
float d20 = d_Data0[yptr0 + 1*size];
float d21 = d_Data0[yptr1 + 1*size];
float d22 = d_Data0[yptr2 + 1*size];
ymin2[tx] = fminf(fminf(ymin1[tx], fminf(fminf(d20, d21), d22)), ymin3[tx]);
ymax2[tx] = fmaxf(fmaxf(ymax1[tx], fmaxf(fmaxf(d20, d21), d22)), ymax3[tx]);
__syncthreads();
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) {
if (d21<d_Threshold[1]) {
float minv = fminf(fminf(fminf(ymin2[tx-1], ymin2[tx+1]), ymin1[tx]), ymin3[tx]);
minv = fminf(fminf(minv, d20), d22);
if (d21<minv) {
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
if (d21>d_Threshold[0]) {
float maxv = fmaxf(fmaxf(fmaxf(ymax2[tx-1], ymax2[tx+1]), ymax1[tx]), ymax3[tx]);
maxv = fmaxf(fmaxf(maxv, d20), d22);
if (d21>maxv) {
int pos = atomicInc(&cnt, 31);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
}
__syncthreads();
}
if (tx<cnt) {
int xpos = points[3*tx+0];
int ypos = points[3*tx+1];
int scale = points[3*tx+2];
int ptr = xpos + (ypos + (scale+1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<d_EdgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = d_Scales[scale] * exp2f(pds*d_Factor);
if (sc>=lowestScale) {
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void LaplaceMultiTex(cudaTextureObject_t texObj, float *d_Result, int width, int pitch, int height)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_Kernel2 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float x = xp-3.5;
float y = yp+0.5;
sdata1[tx] = kernel[4]*tex2D<float>(texObj, x, y) +
kernel[3]*(tex2D<float>(texObj, x, y-1.0) + tex2D<float>(texObj, x, y+1.0)) +
kernel[2]*(tex2D<float>(texObj, x, y-2.0) + tex2D<float>(texObj, x, y+2.0)) +
kernel[1]*(tex2D<float>(texObj, x, y-3.0) + tex2D<float>(texObj, x, y+3.0)) +
kernel[0]*(tex2D<float>(texObj, x, y-4.0) + tex2D<float>(texObj, x, y+4.0));
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[4]*sdata1[tx+4] +
kernel[3]*(sdata1[tx+3] + sdata1[tx+5]) +
kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[1]*(sdata1[tx+1] + sdata1[tx+7]) +
kernel[0]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LaplaceMultiMem(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_Kernel2 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float *data = d_Image + max(min(xp - 4, width-1), 0);
int h = height-1;
sdata1[tx] = kernel[4]*data[min(yp, h)*pitch] +
kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[4]*sdata1[tx+4] +
kernel[3]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[1]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[0]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LowPass(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float buffer[(LOWPASS_W + 2*LOWPASS_R)*LOWPASS_H];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*LOWPASS_W + tx;
const int yp = blockIdx.y*LOWPASS_H + ty;
float *kernel = d_Kernel2;
float *data = d_Image + max(min(xp - 4, width-1), 0);
float *buff = buffer + ty*(LOWPASS_W + 2*LOWPASS_R);
int h = height-1;
if (yp<height)
buff[tx] = kernel[4]*data[min(yp, h)*pitch] +
kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
if (tx<LOWPASS_W && xp<width && yp<height) {
d_Result[yp*pitch + xp] = kernel[4]*buff[tx+4] +
kernel[3]*(buff[tx+3] + buff[tx+5]) + kernel[2]*(buff[tx+2] + buff[tx+6]) +
kernel[1]*(buff[tx+1] + buff[tx+7]) + kernel[0]*(buff[tx+0] + buff[tx+8]);
}
}
|
4d9c331495f2a17cf81c099453c9fc309db59222.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---- the work-list distribution of the interval queue for the ibba thread
//---- Describe the kernel functionality -----------------
/*---- 1. The IBBA thread works on the given interval set and updates the interval queue for each of its sequential iterations
---- 2. When the interval queue size exceeds a certain threshold number(t), the gpu kernel is invoked to reprioritize rest of the queue ( t+1 to N ) while the IBBA thread keeps working on the 0-t elements of the queue.
---- 3. The kernel receives as input M intervals of the queue of P dimensions each. Thus, each element of the queue, has p subintervals corresponding to each dimension.
---- 4. Each interval, Mi, is evaluated in the following way:
The Mi interval of p dimensions is broken down in k subintervals along a specific dimension.
A sampling point along the other fixed dimensions is chosen and the function is evaluated for k sample point. The max value of the evaluations forms the priority value along that dimension.
Thus it will have a priority value along each of its dimensions after traversing the k sampling points across each of the p-dimensions for that interval.
The Max of these priority values become the priority,Pi, of Mi interval.
This evaluation is done for every interval parallely, resulting in new priority values of:
priority List of intervals = P1,P2,...,PM
This priority list is then sorted in order to arrange them in decreasing order of priority.
The sorted priority list is returned from the kernel to the cpu thread
*/
// vector<interval_gpu<float>> gpu_x ;
// #pragma omp parallel for
// for(int i=0; i<x.size()/dimension; i++) {
// for(int j=0; j<dimension; j++) {
// interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right()) ;
// gpu_x.push_back(ij) ;
// }
// }
//
//---------- Coding the IBBA Thread using Gaol -----------------------------
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gaol/gaol.h>
#include "helper_cuda.h"
#include "cuda_interval_lib.h"
#include <limits>
#include <vector>
#include <queue>
#include <deque>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "omp.h"
#include <thread>
#define NUM_THREADS 4
#define CPU_THRESHOLD 2
//#include "cpu_interval.h"
using namespace std ;
//-------- Some experiments -------//
//1. Use the Gaol Library and check the access patterns
//---- Allocate some global shared memory here for writing by gpu and reading by cpu thread -----
//float fbestTag=0.0 ;
__host__ __device__ float returnMax( float a, float b) { if(a > b) return a ; else return b ; }
__host__ __device__ float returnMix( float a, float b) { if(a < b) return a ; else return b ; }
template <typename T> struct KernelArray
{
T* _array ;
int _size ;
} ;
template <typename T> KernelArray<T> convertToKernel(thrust::device_vector<T>& dvec)
{
KernelArray<T> kArray ;
kArray._array = thrust::raw_pointer_cast(&dvec[0]);
kArray._size = (int) dvec.size();
return kArray ;
} ;
void ibbaThread( deque<gaol::interval>& X ) {
cout << X[0].left() << endl ;
gaol::interval f(9,90);
X[0] = f ;
} ;
__global__ void cudaTest ( KernelArray<interval_gpu<float>> inArray, int dim )
{
int qsize = inArray._size/dim;
printf("Qsize= %d\n", qsize);
for(int i=0; i<qsize; i++) {
printf("LL = %f",inArray._array[i].lower()) ;
}
interval_gpu<float> gpu_local(7,8);
inArray._array[0] = gpu_local ;
}
void cudahandleThread ( KernelArray<interval_gpu<float>> inArray, int dim )
{
hipLaunchKernelGGL(( cudaTest), dim3(1),dim3(1), 0, 0, inArray, dim); //--modifies the device_vector directly
hipDeviceSynchronize() ;
}
// template <class Type> Type Func( const Type &x, const Type &y)
// {
// Type z ;
//
// Type z = (1 -
// }
//--- This comes from script ----
gaol::interval cpu_Func_expr ( gaol::interval x, gaol::interval y)
{
gaol::interval z ;
z = (1 - pow(x,2))*cos(5*y) ;
return z ;
}
__device__ float gpu_Func_expr ( float x, float y )
{
float z ;
z = (1 - pow(x,2))*cos(5*y) ;
return z ;
}
int main()
{
gaol::init();
omp_set_num_threads(NUM_THREADS);
thrust::device_vector<interval_gpu<float>> gpu_x ;
thrust::device_vector<float> gpu_pri_label ; //-- priority queue sent to the gpu
thrust::device_vector<float> fbestTag ; //--updated only by the gpu, read by cpu
int dimension = 2 ; //From script
//float fbestTag=0 ;
gaol::interval x_0(-5,7), x_1(0,5) ; //From script
deque<float> x_pri_label ; //From script
deque<gaol::interval> x; //From script
vector<gaol::interval> varMidPoints(dimension) ;
deque<thread> manageThreads ;
x.push_back(x_0) ; //From script //adding the default intervals
x.push_back(x_1) ; //From script
//---- get the priority of the initial interval set ----
for(int i=0; i<dimension; i++) {
gaol::interval temp((x[i].left() + x[i].right())/2);
varMidPoints[i] = temp ;
}
gaol::interval PriTemp = cpu_Func_expr(varMidPoints[0], varMidPoints[1]) ; // How to formalize ?
x_pri_label.push_back((PriTemp.left() + PriTemp.right())/2) ; //Average: although both left and right should be same
//---- Copy the current queue to the gpu_interval type list(overhead) -----
#pragma omp parallel for
for(int i=0; i<x.size()/dimension; i++) {
gpu_pri_label.push_back(x_pri_label[i]) ;
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right());
gpu_x.push_back(ij);
}
}
KernelArray<interval_gpu<float>> iArray = convertToKernel(gpu_x);
//-- create a handler thread for gpu call
manageThreads.push_back(thread(cudahandleThread, iArray, dimension));
// manageThreads.front().join() ;
//--- start the ibba thread here ---
float fbest = numeric_limits<float>::max();
cout << "fbest = " << fbest << endl ;
while(x.size() != 0) {
//----- Synchronization point -----
//-- 1. join the threads, 2. receive the sorted queue from gpu, 3. insert the updated queue from last iteration and merge
manageThreads.front().join();
//--- translate the device_vector to the current array set : gpu_x to x
for(int i=0; i<gpu_x.size()/dimension; i++) {
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij_gpu = gpu_x[i*dimension+j] ;
gaol::interval ij(ij_gpu.lower(), ij_gpu.upper()) ;
x[i*dimension + j] = ij ;
}
x_pri_label[i] = gpu_pri_label[i]; //The priority
}
if(fbestTag.size()!=0)
fbest = returnMax(fbestTag.front(), fbest) ;//MAX from gpu kernel(highest priority value)
// fbest = Max(fbestTag, fbest) ;
gaol::interval curr_interval = x.front();
cout << curr_interval << endl ;
x.pop_front();
}
//--- structure of the array of gaol's interval queue ---
// x = [x_0, x_1, x_2, x_3, x_4,x_5, .....] guaranteed to be multiple of dimension
//--- get the translation of gaol to gpu-interval ---
// thrust::device_vector<interval_gpu<float>> gpu_x ;
#pragma omp parallel for
for(int i=0; i<x.size()/dimension; i++) {
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right()) ;
gpu_x.push_back(ij) ;
}
}
//KernelArray<interval_gpu<float>> iArray = convertToKernel(gpu_x);
hipLaunchKernelGGL(( cudaTest), dim3(1),dim3(1), 0, 0, iArray, dimension);
hipDeviceSynchronize();
interval_gpu<float> temp;
temp = gpu_x[0];
printf("Data-Pass Test = %f\n", temp.upper() );
//ibbaThread(x);
// cout << x[0].left() << endl ;
gaol::cleanup();
}
| 4d9c331495f2a17cf81c099453c9fc309db59222.cu | //---- the work-list distribution of the interval queue for the ibba thread
//---- Describe the kernel functionality -----------------
/*---- 1. The IBBA thread works on the given interval set and updates the interval queue for each of its sequential iterations
---- 2. When the interval queue size exceeds a certain threshold number(t), the gpu kernel is invoked to reprioritize rest of the queue ( t+1 to N ) while the IBBA thread keeps working on the 0-t elements of the queue.
---- 3. The kernel receives as input M intervals of the queue of P dimensions each. Thus, each element of the queue, has p subintervals corresponding to each dimension.
---- 4. Each interval, Mi, is evaluated in the following way:
The Mi interval of p dimensions is broken down in k subintervals along a specific dimension.
A sampling point along the other fixed dimensions is chosen and the function is evaluated for k sample point. The max value of the evaluations forms the priority value along that dimension.
Thus it will have a priority value along each of its dimensions after traversing the k sampling points across each of the p-dimensions for that interval.
The Max of these priority values become the priority,Pi, of Mi interval.
This evaluation is done for every interval parallely, resulting in new priority values of:
priority List of intervals = P1,P2,...,PM
This priority list is then sorted in order to arrange them in decreasing order of priority.
The sorted priority list is returned from the kernel to the cpu thread
*/
// vector<interval_gpu<float>> gpu_x ;
// #pragma omp parallel for
// for(int i=0; i<x.size()/dimension; i++) {
// for(int j=0; j<dimension; j++) {
// interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right()) ;
// gpu_x.push_back(ij) ;
// }
// }
//
//---------- Coding the IBBA Thread using Gaol -----------------------------
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gaol/gaol.h>
#include "helper_cuda.h"
#include "cuda_interval_lib.h"
#include <limits>
#include <vector>
#include <queue>
#include <deque>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "omp.h"
#include <thread>
#define NUM_THREADS 4
#define CPU_THRESHOLD 2
//#include "cpu_interval.h"
using namespace std ;
//-------- Some experiments -------//
//1. Use the Gaol Library and check the access patterns
//---- Allocate some global shared memory here for writing by gpu and reading by cpu thread -----
//float fbestTag=0.0 ;
__host__ __device__ float returnMax( float a, float b) { if(a > b) return a ; else return b ; }
__host__ __device__ float returnMix( float a, float b) { if(a < b) return a ; else return b ; }
template <typename T> struct KernelArray
{
T* _array ;
int _size ;
} ;
template <typename T> KernelArray<T> convertToKernel(thrust::device_vector<T>& dvec)
{
KernelArray<T> kArray ;
kArray._array = thrust::raw_pointer_cast(&dvec[0]);
kArray._size = (int) dvec.size();
return kArray ;
} ;
void ibbaThread( deque<gaol::interval>& X ) {
cout << X[0].left() << endl ;
gaol::interval f(9,90);
X[0] = f ;
} ;
__global__ void cudaTest ( KernelArray<interval_gpu<float>> inArray, int dim )
{
int qsize = inArray._size/dim;
printf("Qsize= %d\n", qsize);
for(int i=0; i<qsize; i++) {
printf("LL = %f",inArray._array[i].lower()) ;
}
interval_gpu<float> gpu_local(7,8);
inArray._array[0] = gpu_local ;
}
void cudahandleThread ( KernelArray<interval_gpu<float>> inArray, int dim )
{
cudaTest<<<1,1>>>(inArray, dim); //--modifies the device_vector directly
cudaDeviceSynchronize() ;
}
// template <class Type> Type Func( const Type &x, const Type &y)
// {
// Type z ;
//
// Type z = (1 -
// }
//--- This comes from script ----
gaol::interval cpu_Func_expr ( gaol::interval x, gaol::interval y)
{
gaol::interval z ;
z = (1 - pow(x,2))*cos(5*y) ;
return z ;
}
__device__ float gpu_Func_expr ( float x, float y )
{
float z ;
z = (1 - pow(x,2))*cos(5*y) ;
return z ;
}
int main()
{
gaol::init();
omp_set_num_threads(NUM_THREADS);
thrust::device_vector<interval_gpu<float>> gpu_x ;
thrust::device_vector<float> gpu_pri_label ; //-- priority queue sent to the gpu
thrust::device_vector<float> fbestTag ; //--updated only by the gpu, read by cpu
int dimension = 2 ; //From script
//float fbestTag=0 ;
gaol::interval x_0(-5,7), x_1(0,5) ; //From script
deque<float> x_pri_label ; //From script
deque<gaol::interval> x; //From script
vector<gaol::interval> varMidPoints(dimension) ;
deque<thread> manageThreads ;
x.push_back(x_0) ; //From script //adding the default intervals
x.push_back(x_1) ; //From script
//---- get the priority of the initial interval set ----
for(int i=0; i<dimension; i++) {
gaol::interval temp((x[i].left() + x[i].right())/2);
varMidPoints[i] = temp ;
}
gaol::interval PriTemp = cpu_Func_expr(varMidPoints[0], varMidPoints[1]) ; // How to formalize ?
x_pri_label.push_back((PriTemp.left() + PriTemp.right())/2) ; //Average: although both left and right should be same
//---- Copy the current queue to the gpu_interval type list(overhead) -----
#pragma omp parallel for
for(int i=0; i<x.size()/dimension; i++) {
gpu_pri_label.push_back(x_pri_label[i]) ;
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right());
gpu_x.push_back(ij);
}
}
KernelArray<interval_gpu<float>> iArray = convertToKernel(gpu_x);
//-- create a handler thread for gpu call
manageThreads.push_back(thread(cudahandleThread, iArray, dimension));
// manageThreads.front().join() ;
//--- start the ibba thread here ---
float fbest = numeric_limits<float>::max();
cout << "fbest = " << fbest << endl ;
while(x.size() != 0) {
//----- Synchronization point -----
//-- 1. join the threads, 2. receive the sorted queue from gpu, 3. insert the updated queue from last iteration and merge
manageThreads.front().join();
//--- translate the device_vector to the current array set : gpu_x to x
for(int i=0; i<gpu_x.size()/dimension; i++) {
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij_gpu = gpu_x[i*dimension+j] ;
gaol::interval ij(ij_gpu.lower(), ij_gpu.upper()) ;
x[i*dimension + j] = ij ;
}
x_pri_label[i] = gpu_pri_label[i]; //The priority
}
if(fbestTag.size()!=0)
fbest = returnMax(fbestTag.front(), fbest) ;//MAX from gpu kernel(highest priority value)
// fbest = Max(fbestTag, fbest) ;
gaol::interval curr_interval = x.front();
cout << curr_interval << endl ;
x.pop_front();
}
//--- structure of the array of gaol's interval queue ---
// x = [x_0, x_1, x_2, x_3, x_4,x_5, .....] guaranteed to be multiple of dimension
//--- get the translation of gaol to gpu-interval ---
// thrust::device_vector<interval_gpu<float>> gpu_x ;
#pragma omp parallel for
for(int i=0; i<x.size()/dimension; i++) {
for(int j=0; j<dimension; j++) {
interval_gpu<float> ij(x[i*dimension+j].left(), x[i*dimension+j].right()) ;
gpu_x.push_back(ij) ;
}
}
//KernelArray<interval_gpu<float>> iArray = convertToKernel(gpu_x);
cudaTest<<<1,1>>>(iArray, dimension);
cudaDeviceSynchronize();
interval_gpu<float> temp;
temp = gpu_x[0];
printf("Data-Pass Test = %f\n", temp.upper() );
//ibbaThread(x);
// cout << x[0].left() << endl ;
gaol::cleanup();
}
|
89050d5f698a590d2156fa4bc6126bd206d8c50b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <time.h>
// 3*4
//
// a11 a12 a13 a14
// a21 a22 a23 a24
// a31 a32 a33 a34
//
// a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34
//
//
// cuBLAS
//
// a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34
// .
//
// (,column)
// cuBLAS (, row) .
//
#define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column
using namespace std;
bool ChoseGpuAvailable(int n)
{
int devicesCount;
hipGetDeviceCount(&devicesCount);
cout<<"devicesCount : "<<devicesCount<<endl;
for(int i = 0 ; i < devicesCount ; i++)
{
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties,i);
cout<<"----- device "<<i<<" -----"<<endl;
cout<<"device name : "<<deviceProperties.name<<endl;
cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl;
cout<<"warpSize : "<<deviceProperties.warpSize<<endl;
}
if(n > devicesCount && n < 0) return false;
else
{
hipSetDevice(n);
return true;
}
}
int main(int argc, char** argv)
{
unsigned int m = 6; //row
unsigned int n = 4; //column
unsigned int k = 5;
float *matrix1, *matrix2, *matrix3;
clock_t t;
int host2device_time, device2host_time,GPU_time;
matrix1 = new float[m*k];
matrix2 = new float[k*n];
matrix3 = new float[m*n];
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
ChoseGpuAvailable(1);
//
int ind = 11;
for(int x = 0 ; x < k ; x++)
{
for(int y = 0 ; y < m ; y++)
{
matrix1[IDX2C(y,x,m)] = (float)ind++;
}
}
ind = 11;
for(int x = 0 ; x < n ; x++)
{
for(int y = 0 ; y < k ; y++)
{
matrix2[IDX2C(y,x,k)] = (float)ind++;
}
}
ind = 11;
for(int x = 0 ; x < n ; x++)
{
for(int y = 0 ; y < m ; y++)
{
matrix3[IDX2C(y,x,m)] = (float)ind++;
}
}
cout<<"maxtrix1"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < k ; x++)
{
cout<<matrix1[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix2"<<endl;
for(int y = 0 ; y < k ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix2[IDX2C(y,x,k)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix3"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix3[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<endl;
//cuda
float *d_matrix1, *d_matrix2, *d_matrix3;
hipMalloc(&d_matrix1,m*k*sizeof(float));
hipMalloc(&d_matrix2,k*n*sizeof(float));
hipMalloc(&d_matrix3,m*n*sizeof(float));
// memory -> cuda memory
t = clock();
hipblasCreate(&handle);
hipblasSetMatrix(m,k,sizeof(float),matrix1,m,d_matrix1,m);
hipblasSetMatrix(k,n,sizeof(float),matrix2,k,d_matrix2,k);
hipblasSetMatrix(m,n,sizeof(float),matrix3,m,d_matrix3,m);
host2device_time = clock()-t;
// ( )
float al=1.0f;
float bet=0.0f;
t = clock();
//stat = hipblasSgemv(handle, HIPBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1);
//
// (0,0)
//3,4 .
//
//CUBLAS_OP_N
//CUBLAS_OP_T transpose.
//
//
stat = hipblasSgemm(handle, HIPBLAS_OP_T,HIPBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m);
GPU_time = clock() - t;
//cuda memory -> memory
t= clock();
hipblasGetMatrix(m,k,sizeof(float),d_matrix1,m,matrix1,m);
hipblasGetMatrix(k,n,sizeof(float),d_matrix2,k,matrix2,k);
hipblasGetMatrix(m,n,sizeof(float),d_matrix3,m,matrix3,m);
device2host_time = clock() - t;
//
cout<<"maxtrix1"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < k ; x++)
{
cout<<matrix1[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix2"<<endl;
for(int y = 0 ; y < k ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix2[IDX2C(y,x,k)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix3"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix3[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"host to device time : "<<host2device_time<<endl;
cout<<"GPU time : "<<GPU_time<<endl;
cout<<"device to host time : "<<device2host_time<<endl;
//cuda
hipFree(d_matrix1);
hipFree(d_matrix2);
hipFree(d_matrix3);
hipblasDestroy(handle);
delete matrix1;
delete matrix2;
delete matrix3;
return 0;
}
| 89050d5f698a590d2156fa4bc6126bd206d8c50b.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <time.h>
//보통 행렬은 3*4 행렬이면
//
// a11 a12 a13 a14
// a21 a22 a23 a24
// a31 a32 a33 a34
//
// a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34
// 위와 같이 저장하지만
//
// cuBLAS에서는
//
// a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34
// 위와 같이 저장된다.
//
// 보통 열(세로,column)의 수 기준으로 저장하지만
// cuBLAS에서는 행(가로, row)의 수 기준으로 저장한다.
//
#define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column
using namespace std;
bool ChoseGpuAvailable(int n)
{
int devicesCount;
cudaGetDeviceCount(&devicesCount);
cout<<"devicesCount : "<<devicesCount<<endl;
for(int i = 0 ; i < devicesCount ; i++)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties,i);
cout<<"----- device "<<i<<" -----"<<endl;
cout<<"device name : "<<deviceProperties.name<<endl;
cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl;
cout<<"warpSize : "<<deviceProperties.warpSize<<endl;
}
if(n > devicesCount && n < 0) return false;
else
{
cudaSetDevice(n);
return true;
}
}
int main(int argc, char** argv)
{
unsigned int m = 6; //row
unsigned int n = 4; //column
unsigned int k = 5;
float *matrix1, *matrix2, *matrix3;
clock_t t;
int host2device_time, device2host_time,GPU_time;
matrix1 = new float[m*k];
matrix2 = new float[k*n];
matrix3 = new float[m*n];
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
ChoseGpuAvailable(1);
//데이터 초기화
int ind = 11;
for(int x = 0 ; x < k ; x++)
{
for(int y = 0 ; y < m ; y++)
{
matrix1[IDX2C(y,x,m)] = (float)ind++;
}
}
ind = 11;
for(int x = 0 ; x < n ; x++)
{
for(int y = 0 ; y < k ; y++)
{
matrix2[IDX2C(y,x,k)] = (float)ind++;
}
}
ind = 11;
for(int x = 0 ; x < n ; x++)
{
for(int y = 0 ; y < m ; y++)
{
matrix3[IDX2C(y,x,m)] = (float)ind++;
}
}
cout<<"maxtrix1"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < k ; x++)
{
cout<<matrix1[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix2"<<endl;
for(int y = 0 ; y < k ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix2[IDX2C(y,x,k)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix3"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix3[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<endl;
//cuda 메모리 할당
float *d_matrix1, *d_matrix2, *d_matrix3;
cudaMalloc(&d_matrix1,m*k*sizeof(float));
cudaMalloc(&d_matrix2,k*n*sizeof(float));
cudaMalloc(&d_matrix3,m*n*sizeof(float));
// memory -> cuda memory
t = clock();
cublasCreate(&handle);
cublasSetMatrix(m,k,sizeof(float),matrix1,m,d_matrix1,m);
cublasSetMatrix(k,n,sizeof(float),matrix2,k,d_matrix2,k);
cublasSetMatrix(m,n,sizeof(float),matrix3,m,d_matrix3,m);
host2device_time = clock()-t;
// 연산 (커널 실행)
float al=1.0f;
float bet=0.0f;
t = clock();
//stat = cublasSgemv(handle, CUBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1);
//
//먼저 주소 값의 바꿔주면서 행렬의 (0,0)의 위치를 바꿔주고
//3,4번째 파라메터로 행렬의 최종 크기를 정해준다.
//
//CUBLAS_OP_N은 아무것도 안한것
//CUBLAS_OP_T는 transpose한것이다.
//
//
stat = cublasSgemm(handle, CUBLAS_OP_T,CUBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m);
GPU_time = clock() - t;
//cuda memory -> memory
t= clock();
cublasGetMatrix(m,k,sizeof(float),d_matrix1,m,matrix1,m);
cublasGetMatrix(k,n,sizeof(float),d_matrix2,k,matrix2,k);
cublasGetMatrix(m,n,sizeof(float),d_matrix3,m,matrix3,m);
device2host_time = clock() - t;
//결과 확인
cout<<"maxtrix1"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < k ; x++)
{
cout<<matrix1[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix2"<<endl;
for(int y = 0 ; y < k ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix2[IDX2C(y,x,k)]<<" ";
}
cout<<endl;
}
cout<<"maxtrix3"<<endl;
for(int y = 0 ; y < m ; y++)
{
for(int x = 0 ; x < n ; x++)
{
cout<<matrix3[IDX2C(y,x,m)]<<" ";
}
cout<<endl;
}
cout<<"host to device time : "<<host2device_time<<endl;
cout<<"GPU time : "<<GPU_time<<endl;
cout<<"device to host time : "<<device2host_time<<endl;
//cuda 메모리 해제
cudaFree(d_matrix1);
cudaFree(d_matrix2);
cudaFree(d_matrix3);
cublasDestroy(handle);
delete matrix1;
delete matrix2;
delete matrix3;
return 0;
}
|
53ef5bebac36e8c522778baf743bc599ad3f9465.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels.hh"
#include "ops.hh"
#include "../runtime/node.hh"
#include "simd_kernels.hh"
namespace cpu
{
namespace
{
void kernel_conv2d(rt::Node* node)
{
conv2d(node->in1, node->in2, node->out1, node->intconst,
node->int_cons1, node->int_cons2, node->sizes1, node->sizes2);
}
/*
void kernel_conv2d_bias_add(rt::Node* node)
{
conv2d_bias_add(node->in1, node->in2, node->out1, node->sizes1);
}
void kernel_conv2d_bias_add_grad(rt::Node* node)
{
conv2d_bias_add_grad(node->in1, node->sizes1, node->out1);
}
*/
void kernel_conv2d_input_grad(rt::Node* node)
{
conv2d_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_kernel_grad(rt::Node* node)
{
conv2d_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_transpose(rt::Node* node)
{
conv2d_transpose(node->in1, node->in2, node->sizes1, node->intconst[0], node->out1, node->sizes2, node->sizes3);
}
void kernel_conv2d_transpose_input_grad(rt::Node* node)
{
conv2d_transpose_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_transpose_kernel_grad(rt::Node* node)
{
conv2d_transpose_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1);
}
void kernel_mat_mat_mul(rt::Node* node)
{
mm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_rvect_add(rt::Node* node)
{
mvrow_add(node->in1, node->in2, node->out1,
node->len1, node->len2);
}
void kernel_relu(rt::Node* node)
{
vect_relu(node->in1, node->out1, node->len1);
}
void kernel_relu_leaky(rt::Node* node)
{
vect_relu_leaky(node->in1, node->out1, node->len1, node->alpha_leaky);
}
void kernel_sigmoid(rt::Node* node)
{
vect_sigmoid(node->in1, node->out1, node->len1);
}
void kernel_mse(rt::Node* node)
{
*node->out1 = mse(node->in1, node->in2, node->len1, node->len2);
}
void kernel_softmax(rt::Node* node)
{
softmax(node->in1, node->out1, node->len1, node->len2);
}
void kernel_log_softmax(rt::Node* node)
{
log_softmax(node->in1, node->out1, node->len1, node->len2);
}
void kernel_softmax_cross_entropy(rt::Node* node)
{
*node->out1 = softmax_cross_entropy(node->in1, node->in2, node->len1, node->len2);
}
void kernel_tanh(rt::Node* node)
{
vect_tanh(node->in1, node->out1, node->len1);
}
void kernel_mse_grad(rt::Node* node)
{
vect_sub_coeff(node->in2, node->in1, 2. / node->len1, node->out1, node->len1);
}
void kernel_sigmoid_grad(rt::Node* node)
{
sigmoid_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_mat_mul_add(rt::Node* node)
{
mat_mul_add(node->in1, node->in2, node->in3, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_tmat_mat_mul(rt::Node* node)
{
tmm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_tmat_mul(rt::Node* node)
{
mtm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_sum_rows(rt::Node* node)
{
mat_sum_rows(node->in1, node->out1, node->len1, node->len2);
}
void kernel_mat_sum_cols(rt::Node* node)
{
mat_sum_cols(node->in1, node->out1, node->len1, node->len2);
}
void kernel_softmax_cross_entropy_grad(rt::Node* node)
{
softmax_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1, node->len2);
}
void kernel_relu_grad(rt::Node* node)
{
relu_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_update(rt::Node* node)
{
vect_update(node->in1, node->out1, *node->in2, node->len1);
}
void kernel_sigmoid_cross_entropy(rt::Node* node)
{
*(node->out1) = sigmoid_cross_entropy(node->in1, node->in2, node->len1);
}
void kernel_sigmoid_cross_entropy_grad(rt::Node* node)
{
sigmoid_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_tanh_grad(rt::Node* node)
{
tanh_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_argmax_acc(rt::Node* node)
{
*(node->out1) = argmax_acc(node->in1, node->in2, node->len1, node->len2);
}
void kernel_moment_update(rt::Node* node)
{
moment_update(node->in1, node->out1, node->cons1, node->cons2, node->len1);
}
void kernel_moment_update2(rt::Node* node)
{
moment_update2(node->in1, node->out1, node->cons1, node->cons2, node->len1);
}
void kernel_adam_update(rt::Node* node)
{
dbl_t* t = node->out2;
dbl_t lr = node->cons1;
dbl_t beta1 = node->cons2;
dbl_t beta2 = node->cons3;
dbl_t eps = node->cons4;
++*t;
dbl_t lrt = lr * std::sqrt(1 - ::pow(beta2, *t))
/ (1 - ::pow(beta1, *t));
adam_update(node->in1, node->in2, node->out1, lrt, eps, node->len1);
}
void kernel_leaky_relu_grad(rt::Node* node)
{
leaky_relu_grad(node->in1, node->in2, node->out1, node->cons1, node->len1);
}
void kernel_add(rt::Node* node)
{
vect_add(node->in1, node->in2, node->out1, node->len1);
}
}
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
void kernels_init()
{
for (std::size_t i = 0; i < 64; ++i)
kernels_list[KERNEL_SIMD_OFFSET + i] = simd_kernels_list[i];
}
}
| 53ef5bebac36e8c522778baf743bc599ad3f9465.cu | #include "kernels.hh"
#include "ops.hh"
#include "../runtime/node.hh"
#include "simd_kernels.hh"
namespace cpu
{
namespace
{
void kernel_conv2d(rt::Node* node)
{
conv2d(node->in1, node->in2, node->out1, node->intconst,
node->int_cons1, node->int_cons2, node->sizes1, node->sizes2);
}
/*
void kernel_conv2d_bias_add(rt::Node* node)
{
conv2d_bias_add(node->in1, node->in2, node->out1, node->sizes1);
}
void kernel_conv2d_bias_add_grad(rt::Node* node)
{
conv2d_bias_add_grad(node->in1, node->sizes1, node->out1);
}
*/
void kernel_conv2d_input_grad(rt::Node* node)
{
conv2d_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_kernel_grad(rt::Node* node)
{
conv2d_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_transpose(rt::Node* node)
{
conv2d_transpose(node->in1, node->in2, node->sizes1, node->intconst[0], node->out1, node->sizes2, node->sizes3);
}
void kernel_conv2d_transpose_input_grad(rt::Node* node)
{
conv2d_transpose_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2);
}
void kernel_conv2d_transpose_kernel_grad(rt::Node* node)
{
conv2d_transpose_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1);
}
void kernel_mat_mat_mul(rt::Node* node)
{
mm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_rvect_add(rt::Node* node)
{
mvrow_add(node->in1, node->in2, node->out1,
node->len1, node->len2);
}
void kernel_relu(rt::Node* node)
{
vect_relu(node->in1, node->out1, node->len1);
}
void kernel_relu_leaky(rt::Node* node)
{
vect_relu_leaky(node->in1, node->out1, node->len1, node->alpha_leaky);
}
void kernel_sigmoid(rt::Node* node)
{
vect_sigmoid(node->in1, node->out1, node->len1);
}
void kernel_mse(rt::Node* node)
{
*node->out1 = mse(node->in1, node->in2, node->len1, node->len2);
}
void kernel_softmax(rt::Node* node)
{
softmax(node->in1, node->out1, node->len1, node->len2);
}
void kernel_log_softmax(rt::Node* node)
{
log_softmax(node->in1, node->out1, node->len1, node->len2);
}
void kernel_softmax_cross_entropy(rt::Node* node)
{
*node->out1 = softmax_cross_entropy(node->in1, node->in2, node->len1, node->len2);
}
void kernel_tanh(rt::Node* node)
{
vect_tanh(node->in1, node->out1, node->len1);
}
void kernel_mse_grad(rt::Node* node)
{
vect_sub_coeff(node->in2, node->in1, 2. / node->len1, node->out1, node->len1);
}
void kernel_sigmoid_grad(rt::Node* node)
{
sigmoid_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_mat_mul_add(rt::Node* node)
{
mat_mul_add(node->in1, node->in2, node->in3, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_tmat_mat_mul(rt::Node* node)
{
tmm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_tmat_mul(rt::Node* node)
{
mtm_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3);
}
void kernel_mat_sum_rows(rt::Node* node)
{
mat_sum_rows(node->in1, node->out1, node->len1, node->len2);
}
void kernel_mat_sum_cols(rt::Node* node)
{
mat_sum_cols(node->in1, node->out1, node->len1, node->len2);
}
void kernel_softmax_cross_entropy_grad(rt::Node* node)
{
softmax_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1, node->len2);
}
void kernel_relu_grad(rt::Node* node)
{
relu_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_update(rt::Node* node)
{
vect_update(node->in1, node->out1, *node->in2, node->len1);
}
void kernel_sigmoid_cross_entropy(rt::Node* node)
{
*(node->out1) = sigmoid_cross_entropy(node->in1, node->in2, node->len1);
}
void kernel_sigmoid_cross_entropy_grad(rt::Node* node)
{
sigmoid_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_tanh_grad(rt::Node* node)
{
tanh_grad(node->in1, node->in2, node->out1, node->len1);
}
void kernel_argmax_acc(rt::Node* node)
{
*(node->out1) = argmax_acc(node->in1, node->in2, node->len1, node->len2);
}
void kernel_moment_update(rt::Node* node)
{
moment_update(node->in1, node->out1, node->cons1, node->cons2, node->len1);
}
void kernel_moment_update2(rt::Node* node)
{
moment_update2(node->in1, node->out1, node->cons1, node->cons2, node->len1);
}
void kernel_adam_update(rt::Node* node)
{
dbl_t* t = node->out2;
dbl_t lr = node->cons1;
dbl_t beta1 = node->cons2;
dbl_t beta2 = node->cons3;
dbl_t eps = node->cons4;
++*t;
dbl_t lrt = lr * std::sqrt(1 - std::pow(beta2, *t))
/ (1 - std::pow(beta1, *t));
adam_update(node->in1, node->in2, node->out1, lrt, eps, node->len1);
}
void kernel_leaky_relu_grad(rt::Node* node)
{
leaky_relu_grad(node->in1, node->in2, node->out1, node->cons1, node->len1);
}
void kernel_add(rt::Node* node)
{
vect_add(node->in1, node->in2, node->out1, node->len1);
}
}
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
void kernels_init()
{
for (std::size_t i = 0; i < 64; ++i)
kernels_list[KERNEL_SIMD_OFFSET + i] = simd_kernels_list[i];
}
}
|
a53abf40c88b6da994660ba5efa63fb4ba5133e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduce1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
int g_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduce1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,g_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduce1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,g_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduce1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,g_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a53abf40c88b6da994660ba5efa63fb4ba5133e3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduce1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
int g_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduce1<<<gridBlock,threadBlock>>>(g_idata,g_odata,g_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduce1<<<gridBlock,threadBlock>>>(g_idata,g_odata,g_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduce1<<<gridBlock,threadBlock>>>(g_idata,g_odata,g_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2f7d9d4976816505f34274e492e4a0f4bb1fc532.hip | // !!! This is a file automatically generated by hipify!!!
// #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <hip/hip_runtime_api.h> // lyq add ^^__^^
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 2f7d9d4976816505f34274e492e4a0f4bb1fc532.cu | // #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <cuda_runtime_api.h> // lyq add ^^__^^
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
6049313a66c3413857a7390a4513868a1d3d83ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//////////// Calculates weighting for assembling single element solution ///////////
// One weight is evaluated for each node
// Added back to global memory
__device__ void jacobi_iter( float *ue, float *up_glob, int *cells, float *temp1, int idx, int idy)
{
float ue_new;
int v;
int offset = 15*threadIdx.x;
/*
Le_shrd = &temp1[offset];
be_shrd = &temp1[offset + 9];
u_old = &temp1[offset + 12];
*/
v = cells[(idx*3) + idy];
ue_new = temp1[(offset + 9) + idy];
temp1[(offset + 12) + idy] = up_glob[v];
__syncthreads();
ue_new -= temp1[offset + (idy*3) + ((idy+1)%3) ] * temp1[(offset + 12) + (idy+1) % 3];
ue_new -= temp1[offset + (idy*3) + ((idy+2)%3) ] * temp1[(offset + 12) + (idy+2) % 3];
ue_new /= temp1[offset + (idy*3) + idy];
ue[(idx*3) + idy] = ue_new;
}
__device__ void elems_shared_cpy(float *Le, float *be, float *temp1, int idx, int idy){
int offset = 15*threadIdx.x;
// Le_shrd = &temp1[offset];
// be_shrd = &temp1[offset + 9];
temp1[(offset + 9) + idy] = be[(idx*3) + idy];
for(int i=0; i<3; i++){
temp1[offset + (idy*3) + i] = Le[(idx*9) + (idy*3) + i];
}
}
__global__ void local_sols( float *Le, float *be, float *ue, float *up_glob, int *cells, int num_cells)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
extern __shared__ float temp1[];
if(idx < num_cells && idy < blockDim.y){
elems_shared_cpy(Le, be, temp1, idx, idy);
__syncthreads();
jacobi_iter(ue, up_glob, cells, temp1, idx, idy);
}
} | 6049313a66c3413857a7390a4513868a1d3d83ad.cu | #include "includes.h"
//////////// Calculates weighting for assembling single element solution ///////////
// One weight is evaluated for each node
// Added back to global memory
__device__ void jacobi_iter( float *ue, float *up_glob, int *cells, float *temp1, int idx, int idy)
{
float ue_new;
int v;
int offset = 15*threadIdx.x;
/*
Le_shrd = &temp1[offset];
be_shrd = &temp1[offset + 9];
u_old = &temp1[offset + 12];
*/
v = cells[(idx*3) + idy];
ue_new = temp1[(offset + 9) + idy];
temp1[(offset + 12) + idy] = up_glob[v];
__syncthreads();
ue_new -= temp1[offset + (idy*3) + ((idy+1)%3) ] * temp1[(offset + 12) + (idy+1) % 3];
ue_new -= temp1[offset + (idy*3) + ((idy+2)%3) ] * temp1[(offset + 12) + (idy+2) % 3];
ue_new /= temp1[offset + (idy*3) + idy];
ue[(idx*3) + idy] = ue_new;
}
__device__ void elems_shared_cpy(float *Le, float *be, float *temp1, int idx, int idy){
int offset = 15*threadIdx.x;
// Le_shrd = &temp1[offset];
// be_shrd = &temp1[offset + 9];
temp1[(offset + 9) + idy] = be[(idx*3) + idy];
for(int i=0; i<3; i++){
temp1[offset + (idy*3) + i] = Le[(idx*9) + (idy*3) + i];
}
}
__global__ void local_sols( float *Le, float *be, float *ue, float *up_glob, int *cells, int num_cells)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
extern __shared__ float temp1[];
if(idx < num_cells && idy < blockDim.y){
elems_shared_cpy(Le, be, temp1, idx, idy);
__syncthreads();
jacobi_iter(ue, up_glob, cells, temp1, idx, idy);
}
} |
2370efa36a3fab96798444b0c5d0ba41481fb1bc.hip | // !!! This is a file automatically generated by hipify!!!
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/stbf3.h>
#include <oz/st.h>
#include <oz/st_util.h>
#include <oz/stintrk2.h>
#include <oz/filter_bf.h>
#include <oz/generate.h>
#include <oz/gpu_sampler2.h>
namespace oz {
template<typename T, int order, bool ustep, bool src_linear, class SRC, class ST> struct StBf3Filter : generator<T> {
unsigned w_, h_;
const SRC src_;
const ST st_;
float sigma_r_, sigma_d_, precision_;
float step_size_;
StBf3Filter( unsigned w, unsigned h, const SRC& src, const ST& st,
float sigma_d, float sigma_r, float precision, float step_size )
: w_(w), h_(h), src_(src), st_(st), sigma_d_(sigma_d),
sigma_r_(sigma_r), precision_(precision), step_size_(step_size) {}
inline __device__ T operator()( int ix, int iy ) const {
float2 p0 = make_float2(ix + 0.5f, iy + 0.5f);
if (ustep) {
filter_bf_trapez<T,SRC,src_linear> f(src_, src_(p0.x, p0.y), sigma_d_, sigma_r_, precision_);
st3_int_ustep<ST,filter_bf_trapez<T,SRC,src_linear>,order>(p0, st_, f, w_, h_, step_size_);
return f.result();
} else {
filter_bf<T,SRC> f(src_, src_(p0.x, p0.y), sigma_d_, sigma_r_, precision_);
st3_int<ST,filter_bf<T,SRC>,order,false>(p0, st_, f, w_, h_, step_size_);
return f.result();
}
}
};
template<typename T, int order>
gpu_image filterTO( const oz::gpu_image& src, bool src_linear,
const oz::gpu_image& st, bool st_linear,
float sigma_d, float sigma_r, float precision,
bool ustep,
float step_size )
{
if (ustep) {
if (src_linear) {
return generate(src.size(), StBf3Filter<T, order, true, true, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, hipFilterModeLinear),
gpu_sampler<float3,1>(st, st_linear? hipFilterModeLinear : hipFilterModePoint),
sigma_d, sigma_r, precision, step_size));
} else {
return generate(src.size(), StBf3Filter<T, order, true, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, hipFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? hipFilterModeLinear : hipFilterModePoint),
sigma_d, sigma_r, precision, step_size));
}
} else {
return generate(src.size(), StBf3Filter<T, order, false, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? hipFilterModeLinear : hipFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? hipFilterModeLinear : hipFilterModePoint),
sigma_d, sigma_r, precision, step_size));
}
}
template<typename T>
gpu_image filterT( const oz::gpu_image& src, bool src_linear,
const oz::gpu_image& st, bool st_linear,
float sigma_d, float sigma_r, float precision,
bool ustep, int order, float step_size )
{
switch (order) {
case 1: return filterTO<T,1>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, step_size);
case 2: return filterTO<T,2>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, step_size);
default:
OZ_X() << "Invalid order!";
}
}
gpu_image stbf3_filter_( const gpu_image& src, const gpu_image& st,
float sigma_d, float sigma_r, float precision,
bool src_linear, bool st_linear, bool ustep, int order, float step_size )
{
if (sigma_d <= 0) return src;
if (src.size() != st.size()) OZ_X() << "Sizes must match!";
switch (src.format()) {
case FMT_FLOAT: return filterT<float >(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, order, step_size);
case FMT_FLOAT3: return filterT<float3>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, order, step_size);
default:
OZ_INVALID_FORMAT();
}
}
}
| 2370efa36a3fab96798444b0c5d0ba41481fb1bc.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/stbf3.h>
#include <oz/st.h>
#include <oz/st_util.h>
#include <oz/stintrk2.h>
#include <oz/filter_bf.h>
#include <oz/generate.h>
#include <oz/gpu_sampler2.h>
namespace oz {
template<typename T, int order, bool ustep, bool src_linear, class SRC, class ST> struct StBf3Filter : generator<T> {
unsigned w_, h_;
const SRC src_;
const ST st_;
float sigma_r_, sigma_d_, precision_;
float step_size_;
StBf3Filter( unsigned w, unsigned h, const SRC& src, const ST& st,
float sigma_d, float sigma_r, float precision, float step_size )
: w_(w), h_(h), src_(src), st_(st), sigma_d_(sigma_d),
sigma_r_(sigma_r), precision_(precision), step_size_(step_size) {}
inline __device__ T operator()( int ix, int iy ) const {
float2 p0 = make_float2(ix + 0.5f, iy + 0.5f);
if (ustep) {
filter_bf_trapez<T,SRC,src_linear> f(src_, src_(p0.x, p0.y), sigma_d_, sigma_r_, precision_);
st3_int_ustep<ST,filter_bf_trapez<T,SRC,src_linear>,order>(p0, st_, f, w_, h_, step_size_);
return f.result();
} else {
filter_bf<T,SRC> f(src_, src_(p0.x, p0.y), sigma_d_, sigma_r_, precision_);
st3_int<ST,filter_bf<T,SRC>,order,false>(p0, st_, f, w_, h_, step_size_);
return f.result();
}
}
};
template<typename T, int order>
gpu_image filterTO( const oz::gpu_image& src, bool src_linear,
const oz::gpu_image& st, bool st_linear,
float sigma_d, float sigma_r, float precision,
bool ustep,
float step_size )
{
if (ustep) {
if (src_linear) {
return generate(src.size(), StBf3Filter<T, order, true, true, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, cudaFilterModeLinear),
gpu_sampler<float3,1>(st, st_linear? cudaFilterModeLinear : cudaFilterModePoint),
sigma_d, sigma_r, precision, step_size));
} else {
return generate(src.size(), StBf3Filter<T, order, true, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, cudaFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? cudaFilterModeLinear : cudaFilterModePoint),
sigma_d, sigma_r, precision, step_size));
}
} else {
return generate(src.size(), StBf3Filter<T, order, false, false, gpu_sampler<T,0>, gpu_sampler<float3,1> >(
src.w(), src.h(),
gpu_sampler<T,0>(src, src_linear? cudaFilterModeLinear : cudaFilterModePoint),
gpu_sampler<float3,1>(st, st_linear? cudaFilterModeLinear : cudaFilterModePoint),
sigma_d, sigma_r, precision, step_size));
}
}
template<typename T>
gpu_image filterT( const oz::gpu_image& src, bool src_linear,
const oz::gpu_image& st, bool st_linear,
float sigma_d, float sigma_r, float precision,
bool ustep, int order, float step_size )
{
switch (order) {
case 1: return filterTO<T,1>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, step_size);
case 2: return filterTO<T,2>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, step_size);
default:
OZ_X() << "Invalid order!";
}
}
gpu_image stbf3_filter_( const gpu_image& src, const gpu_image& st,
float sigma_d, float sigma_r, float precision,
bool src_linear, bool st_linear, bool ustep, int order, float step_size )
{
if (sigma_d <= 0) return src;
if (src.size() != st.size()) OZ_X() << "Sizes must match!";
switch (src.format()) {
case FMT_FLOAT: return filterT<float >(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, order, step_size);
case FMT_FLOAT3: return filterT<float3>(src, src_linear, st, st_linear, sigma_d, sigma_r, precision, ustep, order, step_size);
default:
OZ_INVALID_FORMAT();
}
}
}
|
e62d69300dd896b33c44ca7edc49a51e952164d7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The MIT License
*
* Copyright (c) 1997-2023 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <random>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <MersenneTwister.h>
#define BLKWIDTH 32
//______________________________________________________________________
//
//
// The following compares the random number generation on the CPU vs GPU
//
//
//______________________________________________________________________
//______________________________________________________________________
//
inline int RoundUp(double d)
{
if(d>=0){
if((d-(int)d) == 0){
return (int)d;
} else{
return (int)(d+1);
}
} else {
return (int)d;
}
}
//______________________________________________________________________
//
void stopwatch( std::string message, time_t start)
{
double secs;
time_t stop; /* timing variables */
stop = time(nullptr);
secs = difftime(stop, start);
fprintf(stdout," %.f [s] %s \n",secs, message.c_str());
}
//______________________________________________________________________
// CPU based random number generations
void randCPU( double *M, int nRandNums)
{
unsigned int size = nRandNums;
unsigned int Imem_size = sizeof(unsigned int) * size;
unsigned int Dmem_size = sizeof(double) * size;
int* org_randInt = (int*)malloc(Imem_size);
int* new_randInt = (int*)malloc(Imem_size);
double* org_randDbl = (double*)malloc(Dmem_size);
double* new_randDbl = (double*)malloc(Dmem_size);
//__________________________________
// Orginal implementation
MTRand mTwister;
for (int i = 0; i< nRandNums; i++){
mTwister.seed(i);
org_randDbl[i] = mTwister.rand();
org_randInt[i] = mTwister.randInt();
}
//__________________________________
// C++11
std::mt19937 mTwist;
std::uniform_real_distribution<double> D_dist(0.0,1.0);
std::uniform_int_distribution<int> I_dist; //
mTwist.seed(1234ULL);
for (int i = 0; i< nRandNums; i++){
new_randDbl[i] = D_dist( mTwist );
new_randInt[i] = I_dist( mTwist );
}
for (int i = 0; i< nRandNums; i++){
M[i] = new_randDbl[i];
}
for (int i = 0; i< nRandNums; i++){
printf( "%i org_randDbl: %g new_randDbl: %g org_randInt: %i, new_randInt: %i\n",i, org_randDbl[i], new_randDbl[i], org_randInt[i], new_randInt[i]);
}
free( org_randInt );
free( new_randInt );
free( org_randDbl );
free( new_randDbl );
}
//______________________________________________________________________
// Determine device properties
void deviceProperties( int &maxThreadsPerBlock )
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
// Iterate through devices
for (int deviceNum = 0; deviceNum < devCount; ++deviceNum){
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, deviceNum);
// printDevProp(deviceProp);
maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
}
}
//______________________________________________________________________
// This is the host side random number generation using cuda
void randGPU_V1( double *M, int nRandNums)
{
int size = nRandNums* sizeof(double);
double* Md;
//__________________________________
// allocate device memory and copy memory to the device
hipMalloc( (void**)&Md, size);
hipMemcpy( Md, M, size, hipMemcpyHostToDevice );
//__________________________________
// Create pseudo-random number generator
// set the seed
// generate the numbers
hiprandGenerator_t randGen;
// hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetPseudoRandomGeneratorSeed(randGen, 1234ULL);
hiprandGenerateUniformDouble(randGen, Md, nRandNums);
//__________________________________
// copy from device memory and free device matrices
hipMemcpy( M, Md, size, hipMemcpyDeviceToHost );
hipFree( Md );
hiprandDestroyGenerator(randGen);
}
//______________________________________________________________________
// Returns an random number
__device__ double randDevice(hiprandState_t* globalState, const int tid)
{
hiprandState_t localState = globalState[tid];
double val = hiprand(&localState);
globalState[tid] = localState;
return (double)val * (1.0/4294967295.0);
}
//______________________________________________________________________
// Returns an random number excluding 0 & 1.0. See MersenneTwister.h
//
__device__ double randDblExcDevice(hiprandState_t* globalState, const int tid)
{
hiprandState_t localState = globalState[tid];
double val = hiprand(&localState);
globalState[tid] = localState;
return ( double(val) + 0.5 ) * (1.0/4294967296.0);
}
//______________________________________________________________________
//
__global__ void setup_kernel(hiprandState_t* randNumStates)
{
int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(1234, tID, 0, &randNumStates[tID]);
}
//______________________________________________________________________
// Kernel:
__global__ void randNumKernel( hiprandState_t* randNumStates, double* M, double* N, int nRandNums )
{
int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
// for (int k = 0; k < nRandNums; ++k){
M[tID] = randDblExcDevice( randNumStates, tID);
N[tID] = randDevice( randNumStates, tID );
// }
}
//______________________________________________________________________
// Device side random number generator
void randGPU_V2( double *M, double *N,int nRandNums)
{
int size = nRandNums* sizeof(double);
double* Md;
double* Nd;
//__________________________________
// allocate device memory and copy memory to the device
hipMalloc( (void**)&Md, size);
hipMalloc( (void**)&Nd, size);
//__________________________________
// copy host memory -> device
hipMemcpy( Md, M, size, hipMemcpyHostToDevice );
hipMemcpy( Nd, N, size, hipMemcpyHostToDevice );
//__________________________________
//
int maxThreadsPerBlock = 0;
deviceProperties( maxThreadsPerBlock );
int xMaxThreadsPerBlock = BLKWIDTH;
int yMaxThreadsPerBlock = BLKWIDTH;
maxThreadsPerBlock = xMaxThreadsPerBlock * yMaxThreadsPerBlock; // hardwired for now
int threadsPerBlock = min(maxThreadsPerBlock, nRandNums);
int xBlocks = 0;
int yBlocks = 0;
if( nRandNums > maxThreadsPerBlock){
int nBlocks = RoundUp( nRandNums/sqrt(maxThreadsPerBlock) );
xBlocks = RoundUp( nRandNums/xMaxThreadsPerBlock );
yBlocks = RoundUp( nRandNums/yMaxThreadsPerBlock );
}else{
xBlocks = 1; // if matrix is smaller than 1 block
yBlocks = 1;
}
int nBlocks = xBlocks = yBlocks; // Assumption that
int me = xBlocks * yBlocks * threadsPerBlock;
fprintf(stdout, " xBlocks: %d, yBlocks: %d, nRandNums: %d BLKWIDTH: %d, threadsPerBlock %d ",xBlocks, yBlocks, nRandNums, BLKWIDTH, threadsPerBlock);
fprintf(stdout, " number of threads: %d\n",me);
//__________________________________
// Kernel invocation
dim3 dimBlock(BLKWIDTH, BLKWIDTH, 1);
dim3 dimGrid( xBlocks, yBlocks, 1);
// setup random number generator states on the device, 1 for each thread
hiprandState_t* randNumStates;
int numStates = dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y * dimBlock.z;
hipMalloc((void**)&randNumStates, numStates * sizeof(hiprandState_t));
//__________________________________
// Global Memory Kernel
time_t start = time(nullptr);
hipLaunchKernelGGL(( setup_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, randNumStates );
stopwatch(" randDeviceGPU setup_kernel: ", start);
start = time(nullptr);
hipLaunchKernelGGL(( randNumKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, randNumStates, Md, Nd, nRandNums );
stopwatch(" randDeviceGPU randNumKernel: ", start);
//__________________________________
// copy from device memory and free memory
start = time(nullptr);
hipMemcpy( M, Md, size, hipMemcpyDeviceToHost );
hipMemcpy( N, Nd, size, hipMemcpyDeviceToHost );
stopwatch(" randDeviceGPU memcopy: ", start);
start = time(nullptr);
hipFree( Md );
hipFree( Nd );
hipFree(randNumStates) ;
stopwatch(" randDeviceGPU free memory: ", start);
}
//______________________________________________________________________
int main( int argc, char** argv)
{
FILE *fp;
fp = fopen("randomNumbers.dat", "w");
for(int power = 0; power<2; ++power) {
//int nRandNums = pow(10,power);
int nRandNums = 8;
fprintf(stdout,"__________________________________\n");
fprintf(stdout," nRand %d \n", nRandNums);
//__________________________________
// allocate memory
unsigned int size = nRandNums;
unsigned int mem_size = sizeof(double) * size;
double* rand_CPU = (double*)malloc(mem_size);
double* rand_GPU_L = (double*)malloc(mem_size);
double* rand_GPU_M = (double*)malloc(mem_size);
double* rand_GPU_N = (double*)malloc(mem_size);
time_t start;
start = time(nullptr);
//__________________________________
// Compute the random numbers
randCPU( rand_CPU, nRandNums );
stopwatch(" randCPU: ", start);
start = time(nullptr);
randGPU_V1( rand_GPU_L, nRandNums);
stopwatch(" randGPU_V1: ", start);
start = time(nullptr);
randGPU_V2( rand_GPU_M, rand_GPU_N, nRandNums);
stopwatch(" randGPU_V2: ", start);
//__________________________________
// Output data
fprintf( fp, " #CPU, GPU_V1, GPU_dblExc, GPU_dblInc\n");
for (int i = 0; i< nRandNums; i++){
fprintf( fp, "%i:%i, %16.15E, %16.15E, %16.15E, %16.15E\n",power,i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] );
//printf( "%i, %16.15E, %16.15E, %16.15E, %16.15E\n",i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] );
}
//__________________________________
//Free memory
free( rand_CPU );
free( rand_GPU_L );
free( rand_GPU_M );
free( rand_GPU_N );
} // loop
fclose(fp);
}
| e62d69300dd896b33c44ca7edc49a51e952164d7.cu | /*
* The MIT License
*
* Copyright (c) 1997-2023 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <random>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <MersenneTwister.h>
#define BLKWIDTH 32
//______________________________________________________________________
//
//
// The following compares the random number generation on the CPU vs GPU
//
//
//______________________________________________________________________
//______________________________________________________________________
//
inline int RoundUp(double d)
{
if(d>=0){
if((d-(int)d) == 0){
return (int)d;
} else{
return (int)(d+1);
}
} else {
return (int)d;
}
}
//______________________________________________________________________
//
void stopwatch( std::string message, time_t start)
{
double secs;
time_t stop; /* timing variables */
stop = time(nullptr);
secs = difftime(stop, start);
fprintf(stdout," %.f [s] %s \n",secs, message.c_str());
}
//______________________________________________________________________
// CPU based random number generations
void randCPU( double *M, int nRandNums)
{
unsigned int size = nRandNums;
unsigned int Imem_size = sizeof(unsigned int) * size;
unsigned int Dmem_size = sizeof(double) * size;
int* org_randInt = (int*)malloc(Imem_size);
int* new_randInt = (int*)malloc(Imem_size);
double* org_randDbl = (double*)malloc(Dmem_size);
double* new_randDbl = (double*)malloc(Dmem_size);
//__________________________________
// Orginal implementation
MTRand mTwister;
for (int i = 0; i< nRandNums; i++){
mTwister.seed(i);
org_randDbl[i] = mTwister.rand();
org_randInt[i] = mTwister.randInt();
}
//__________________________________
// C++11
std::mt19937 mTwist;
std::uniform_real_distribution<double> D_dist(0.0,1.0);
std::uniform_int_distribution<int> I_dist; //
mTwist.seed(1234ULL);
for (int i = 0; i< nRandNums; i++){
new_randDbl[i] = D_dist( mTwist );
new_randInt[i] = I_dist( mTwist );
}
for (int i = 0; i< nRandNums; i++){
M[i] = new_randDbl[i];
}
for (int i = 0; i< nRandNums; i++){
printf( "%i org_randDbl: %g new_randDbl: %g org_randInt: %i, new_randInt: %i\n",i, org_randDbl[i], new_randDbl[i], org_randInt[i], new_randInt[i]);
}
free( org_randInt );
free( new_randInt );
free( org_randDbl );
free( new_randDbl );
}
//______________________________________________________________________
// Determine device properties
void deviceProperties( int &maxThreadsPerBlock )
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
// Iterate through devices
for (int deviceNum = 0; deviceNum < devCount; ++deviceNum){
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceNum);
// printDevProp(deviceProp);
maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
}
}
//______________________________________________________________________
// This is the host side random number generation using cuda
void randGPU_V1( double *M, int nRandNums)
{
int size = nRandNums* sizeof(double);
double* Md;
//__________________________________
// allocate device memory and copy memory to the device
cudaMalloc( (void**)&Md, size);
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice );
//__________________________________
// Create pseudo-random number generator
// set the seed
// generate the numbers
curandGenerator_t randGen;
// curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT);
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_MT19937);
curandSetPseudoRandomGeneratorSeed(randGen, 1234ULL);
curandGenerateUniformDouble(randGen, Md, nRandNums);
//__________________________________
// copy from device memory and free device matrices
cudaMemcpy( M, Md, size, cudaMemcpyDeviceToHost );
cudaFree( Md );
curandDestroyGenerator(randGen);
}
//______________________________________________________________________
// Returns an random number
__device__ double randDevice(curandState* globalState, const int tid)
{
curandState localState = globalState[tid];
double val = curand(&localState);
globalState[tid] = localState;
return (double)val * (1.0/4294967295.0);
}
//______________________________________________________________________
// Returns an random number excluding 0 & 1.0. See MersenneTwister.h
//
__device__ double randDblExcDevice(curandState* globalState, const int tid)
{
curandState localState = globalState[tid];
double val = curand(&localState);
globalState[tid] = localState;
return ( double(val) + 0.5 ) * (1.0/4294967296.0);
}
//______________________________________________________________________
//
__global__ void setup_kernel(curandState* randNumStates)
{
int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(1234, tID, 0, &randNumStates[tID]);
}
//______________________________________________________________________
// Kernel:
__global__ void randNumKernel( curandState* randNumStates, double* M, double* N, int nRandNums )
{
int tID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
// for (int k = 0; k < nRandNums; ++k){
M[tID] = randDblExcDevice( randNumStates, tID);
N[tID] = randDevice( randNumStates, tID );
// }
}
//______________________________________________________________________
// Device side random number generator
void randGPU_V2( double *M, double *N,int nRandNums)
{
int size = nRandNums* sizeof(double);
double* Md;
double* Nd;
//__________________________________
// allocate device memory and copy memory to the device
cudaMalloc( (void**)&Md, size);
cudaMalloc( (void**)&Nd, size);
//__________________________________
// copy host memory -> device
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice );
cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice );
//__________________________________
//
int maxThreadsPerBlock = 0;
deviceProperties( maxThreadsPerBlock );
int xMaxThreadsPerBlock = BLKWIDTH;
int yMaxThreadsPerBlock = BLKWIDTH;
maxThreadsPerBlock = xMaxThreadsPerBlock * yMaxThreadsPerBlock; // hardwired for now
int threadsPerBlock = min(maxThreadsPerBlock, nRandNums);
int xBlocks = 0;
int yBlocks = 0;
if( nRandNums > maxThreadsPerBlock){
int nBlocks = RoundUp( nRandNums/sqrt(maxThreadsPerBlock) );
xBlocks = RoundUp( nRandNums/xMaxThreadsPerBlock );
yBlocks = RoundUp( nRandNums/yMaxThreadsPerBlock );
}else{
xBlocks = 1; // if matrix is smaller than 1 block
yBlocks = 1;
}
int nBlocks = xBlocks = yBlocks; // Assumption that
int me = xBlocks * yBlocks * threadsPerBlock;
fprintf(stdout, " xBlocks: %d, yBlocks: %d, nRandNums: %d BLKWIDTH: %d, threadsPerBlock %d ",xBlocks, yBlocks, nRandNums, BLKWIDTH, threadsPerBlock);
fprintf(stdout, " number of threads: %d\n",me);
//__________________________________
// Kernel invocation
dim3 dimBlock(BLKWIDTH, BLKWIDTH, 1);
dim3 dimGrid( xBlocks, yBlocks, 1);
// setup random number generator states on the device, 1 for each thread
curandState* randNumStates;
int numStates = dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y * dimBlock.z;
cudaMalloc((void**)&randNumStates, numStates * sizeof(curandState));
//__________________________________
// Global Memory Kernel
time_t start = time(nullptr);
setup_kernel<<<dimGrid, dimBlock>>>( randNumStates );
stopwatch(" randDeviceGPU setup_kernel: ", start);
start = time(nullptr);
randNumKernel<<<dimGrid, dimBlock>>>( randNumStates, Md, Nd, nRandNums );
stopwatch(" randDeviceGPU randNumKernel: ", start);
//__________________________________
// copy from device memory and free memory
start = time(nullptr);
cudaMemcpy( M, Md, size, cudaMemcpyDeviceToHost );
cudaMemcpy( N, Nd, size, cudaMemcpyDeviceToHost );
stopwatch(" randDeviceGPU memcopy: ", start);
start = time(nullptr);
cudaFree( Md );
cudaFree( Nd );
cudaFree(randNumStates) ;
stopwatch(" randDeviceGPU free memory: ", start);
}
//______________________________________________________________________
int main( int argc, char** argv)
{
FILE *fp;
fp = fopen("randomNumbers.dat", "w");
for(int power = 0; power<2; ++power) {
//int nRandNums = pow(10,power);
int nRandNums = 8;
fprintf(stdout,"__________________________________\n");
fprintf(stdout," nRand %d \n", nRandNums);
//__________________________________
// allocate memory
unsigned int size = nRandNums;
unsigned int mem_size = sizeof(double) * size;
double* rand_CPU = (double*)malloc(mem_size);
double* rand_GPU_L = (double*)malloc(mem_size);
double* rand_GPU_M = (double*)malloc(mem_size);
double* rand_GPU_N = (double*)malloc(mem_size);
time_t start;
start = time(nullptr);
//__________________________________
// Compute the random numbers
randCPU( rand_CPU, nRandNums );
stopwatch(" randCPU: ", start);
start = time(nullptr);
randGPU_V1( rand_GPU_L, nRandNums);
stopwatch(" randGPU_V1: ", start);
start = time(nullptr);
randGPU_V2( rand_GPU_M, rand_GPU_N, nRandNums);
stopwatch(" randGPU_V2: ", start);
//__________________________________
// Output data
fprintf( fp, " #CPU, GPU_V1, GPU_dblExc, GPU_dblInc\n");
for (int i = 0; i< nRandNums; i++){
fprintf( fp, "%i:%i, %16.15E, %16.15E, %16.15E, %16.15E\n",power,i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] );
//printf( "%i, %16.15E, %16.15E, %16.15E, %16.15E\n",i, rand_CPU[i], rand_GPU_L[i], rand_GPU_M[i], rand_GPU_N[i] );
}
//__________________________________
//Free memory
free( rand_CPU );
free( rand_GPU_L );
free( rand_GPU_M );
free( rand_GPU_N );
} // loop
fclose(fp);
}
|
81db5379589c57838a15d421db531462f3209d8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_zbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 81db5379589c57838a15d421db531462f3209d8a.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_zbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
77284a13bf01b2916a9b6f8000aa29ec1323bfe5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include <thrust/iterator/discard_iterator.h>
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, int_type old_reccount)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
int_type res_size = 0;
//thrust::equal_to<bool> binary_pred_l;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<float_type*> exe_vectors_d;
stack<int_type> exe_nums1;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line = 0;
if (a->columnGroups.empty() && a->mRecCount != 0)
one_line = 1;
thrust::device_ptr<bool> d_di(a->grp);
if (!(a->columnGroups).empty() && (a->mRecCount != 0))
res_size = a->grp_count;
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0) {
if (ss.compare("COUNT") == 0) {
grp_type = "COUNT";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
}
}
else if (ss.compare("SUM") == 0) {
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = thrust::reduce(source, source+a->mRecCount);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
};
hipFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = thrust::reduce(source, source+a->mRecCount);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if (!a->columnGroups.empty()) {
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> dest;
int_type cc = thrust::reduce(source, source+a->mRecCount);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> dest;
float_type cc = thrust::reduce(source, source+a->mRecCount);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
}
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
};
//op_value.pop();
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t1,t,ss,0));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,s3,ss,0));
//free s3
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
hipFree(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
hipFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,s3,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
hipFree(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
hipFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3,n1, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1, ss,1));
hipFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3,n1, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1, ss,0));
hipFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
hipFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
hipFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3, s4,ss,0));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_value1.push(exe_value.top());
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
b->grp_type = new unsigned int[colCount];
for(int j=0; j<colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[colCount-j-1] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[colCount-j-1] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[colCount-j-1] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[colCount-j-1] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[colCount-j-1] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[colCount-j-1] = 5;
if(col_type.top() == 0) {
// create a vector
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
if (!(a->columnGroups).empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
exe_nums1.pop();
};
if(col_type.top() == 1) {
unsigned int colIndex = (a->columnNames).find(exe_value1.top())->second;
if(a->type[colIndex] == 0) {
thrust::device_ptr<int_type> s((int_type*)(a->d_columns)[colIndex]);
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
}
else if(a->type[colIndex] == 1) {
thrust::device_ptr<float_type> s((float_type*)(a->d_columns)[colIndex]);
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
}
else if(a->type[colIndex] == 2) { //varchar
if (b->columnNames.find(col_val.top()) == b->columnNames.end()) {
CudaChar* n = new CudaChar(((CudaChar*)(a->h_columns)[colIndex])->mColumnCount, old_reccount);
b->h_columns[colCount-j-1] = n;
b->columnNames[col_val.top()] = colCount-j-1;
b->type[colCount-j-1] = 2;
};
CudaChar *cc = (CudaChar*)(a->h_columns)[colIndex];
CudaChar* nn = (CudaChar*)b->h_columns[colCount-j-1];
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
nn->allocOnDevice(a->mRecCount);
thrust::device_ptr<bool> d_grp(a->grp);
for(unsigned int k=0; k < (nn->mColumnCount); k++) {
thrust::device_ptr<char> sr((cc->d_columns)[k]);
thrust::device_ptr<char> de((nn->d_columns)[k]);
thrust::copy_if(sr,sr+(a->mRecCount), d_grp, de, nz<bool>());
};
}
else {
//copy the cc to new
for(unsigned int k=0; k < (nn->mColumnCount); k++)
hipMemcpy((void *) (nn->h_columns[k] + b->mRecCount), (void *) (cc->d_columns)[k], a->mRecCount, hipMemcpyDeviceToHost);
}
}
exe_value1.pop();
};
if(col_type.top() == 2) { // int
if (!(a->columnGroups).empty())
b->addDeviceColumn(exe_vectors1.top() , colCount-j-1, col_val.top(), res_size);
else
b->addHostColumn(exe_vectors1.top() , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
hipFree(exe_vectors1.top());
exe_vectors1.pop();
}
if(col_type.top() == 3) { //float
if (!(a->columnGroups).empty()) {
b->addDeviceColumn(exe_vectors1_d.top() , colCount-j-1, col_val.top(), res_size);
}
else
b->addHostColumn(exe_vectors1_d.top() , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
hipFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if ((a->columnGroups).empty()) {
if ( !one_line)
b->mRecCount = b->mRecCount + a->mRecCount;
else
b->mRecCount = b->mRecCount + 1;
}
else
b->mRecCount = res_size;
}
| 77284a13bf01b2916a9b6f8000aa29ec1323bfe5.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include <thrust/iterator/discard_iterator.h>
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, int_type old_reccount)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
int_type res_size = 0;
//thrust::equal_to<bool> binary_pred_l;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<float_type*> exe_vectors_d;
stack<int_type> exe_nums1;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line = 0;
if (a->columnGroups.empty() && a->mRecCount != 0)
one_line = 1;
thrust::device_ptr<bool> d_di(a->grp);
if (!(a->columnGroups).empty() && (a->mRecCount != 0))
res_size = a->grp_count;
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0) {
if (ss.compare("COUNT") == 0) {
grp_type = "COUNT";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
}
}
else if (ss.compare("SUM") == 0) {
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = thrust::reduce(source, source+a->mRecCount);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
};
cudaFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (!a->columnGroups.empty()) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = thrust::reduce(source, source+a->mRecCount);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if (!a->columnGroups.empty()) {
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> dest;
int_type cc = thrust::reduce(source, source+a->mRecCount);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> dest;
float_type cc = thrust::reduce(source, source+a->mRecCount);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
}
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
unsigned int colIndex = (a->columnNames).find(s1_val)->second;
if((a->type)[colIndex] == 0) {
thrust::device_ptr<int_type> source((int_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if((a->type)[colIndex] == 1) {
thrust::device_ptr<float_type> source((float_type*)(a->d_columns)[colIndex]);
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::reduce_by_key(d_di, d_di+(a->mRecCount), source,
thrust::make_discard_iterator(), count_diff,
head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
};
//op_value.pop();
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t1,t,ss,0));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,s3,ss,0));
//free s3
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
cudaFree(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
cudaFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,s3,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
cudaFree(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
cudaFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3,n1, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3,n1, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1, ss,0));
cudaFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
cudaFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(a->op(s3, s4,ss,0));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_value1.push(exe_value.top());
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
b->grp_type = new unsigned int[colCount];
for(int j=0; j<colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[colCount-j-1] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[colCount-j-1] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[colCount-j-1] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[colCount-j-1] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[colCount-j-1] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[colCount-j-1] = 5;
if(col_type.top() == 0) {
// create a vector
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
if (!(a->columnGroups).empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
exe_nums1.pop();
};
if(col_type.top() == 1) {
unsigned int colIndex = (a->columnNames).find(exe_value1.top())->second;
if(a->type[colIndex] == 0) {
thrust::device_ptr<int_type> s((int_type*)(a->d_columns)[colIndex]);
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
}
else if(a->type[colIndex] == 1) {
thrust::device_ptr<float_type> s((float_type*)(a->d_columns)[colIndex]);
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(s,s+(a->mRecCount), d_grp, count_diff, nz<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , colCount-j-1, col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addHostColumn(thrust::raw_pointer_cast(s) , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
}
else if(a->type[colIndex] == 2) { //varchar
if (b->columnNames.find(col_val.top()) == b->columnNames.end()) {
CudaChar* n = new CudaChar(((CudaChar*)(a->h_columns)[colIndex])->mColumnCount, old_reccount);
b->h_columns[colCount-j-1] = n;
b->columnNames[col_val.top()] = colCount-j-1;
b->type[colCount-j-1] = 2;
};
CudaChar *cc = (CudaChar*)(a->h_columns)[colIndex];
CudaChar* nn = (CudaChar*)b->h_columns[colCount-j-1];
//modify what we push there in case of a grouping
if (!(a->columnGroups).empty()) {
nn->allocOnDevice(a->mRecCount);
thrust::device_ptr<bool> d_grp(a->grp);
for(unsigned int k=0; k < (nn->mColumnCount); k++) {
thrust::device_ptr<char> sr((cc->d_columns)[k]);
thrust::device_ptr<char> de((nn->d_columns)[k]);
thrust::copy_if(sr,sr+(a->mRecCount), d_grp, de, nz<bool>());
};
}
else {
//copy the cc to new
for(unsigned int k=0; k < (nn->mColumnCount); k++)
cudaMemcpy((void *) (nn->h_columns[k] + b->mRecCount), (void *) (cc->d_columns)[k], a->mRecCount, cudaMemcpyDeviceToHost);
}
}
exe_value1.pop();
};
if(col_type.top() == 2) { // int
if (!(a->columnGroups).empty())
b->addDeviceColumn(exe_vectors1.top() , colCount-j-1, col_val.top(), res_size);
else
b->addHostColumn(exe_vectors1.top() , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
cudaFree(exe_vectors1.top());
exe_vectors1.pop();
}
if(col_type.top() == 3) { //float
if (!(a->columnGroups).empty()) {
b->addDeviceColumn(exe_vectors1_d.top() , colCount-j-1, col_val.top(), res_size);
}
else
b->addHostColumn(exe_vectors1_d.top() , colCount-j-1, col_val.top(), a->mRecCount, old_reccount, one_line);
cudaFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if ((a->columnGroups).empty()) {
if ( !one_line)
b->mRecCount = b->mRecCount + a->mRecCount;
else
b->mRecCount = b->mRecCount + 1;
}
else
b->mRecCount = res_size;
}
|
d3cf996503f77f3a6c0b2dee33539273d30dbbf4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/aggregation.hpp>
#include <cudf/types.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_count_valid(
column_view const& values,
rmm::device_vector<size_type> const& group_labels,
size_type num_groups,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(num_groups >= 0, "number of groups cannot be negative");
CUDF_EXPECTS(static_cast<size_t>(values.size()) == group_labels.size(),
"Size of values column should be same as that of group labels");
auto result = make_numeric_column(data_type(type_to_id<size_type>()),
num_groups, mask_state::UNALLOCATED, stream, mr);
if (num_groups == 0) {
return result;
}
if (values.nullable()) {
auto values_view = column_device_view::create(values);
// make_validity_iterator returns a boolean iterator that sums to 1 (1+1=1)
// so we need to transform it to cast it to an integer type
auto bitmask_iterator = thrust::make_transform_iterator(
experimental::detail::make_validity_iterator(*values_view),
[] __device__ (auto b) { return static_cast<size_type>(b); });
thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream),
group_labels.begin(),
group_labels.end(),
bitmask_iterator,
thrust::make_discard_iterator(),
result->mutable_view().begin<size_type>());
} else {
thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream),
group_labels.begin(),
group_labels.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
result->mutable_view().begin<size_type>());
}
return result;
}
std::unique_ptr<column> group_count_all(
rmm::device_vector<size_type> const& group_offsets,
size_type num_groups,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(num_groups >= 0, "number of groups cannot be negative");
auto result = make_numeric_column(data_type(type_to_id<size_type>()),
num_groups, mask_state::UNALLOCATED, stream, mr);
if (num_groups == 0) {
return result;
}
thrust::adjacent_difference(rmm::exec_policy(stream)->on(stream),
group_offsets.begin() + 1,
group_offsets.end(),
result->mutable_view().begin<size_type>());
return result;
}
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
| d3cf996503f77f3a6c0b2dee33539273d30dbbf4.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/aggregation.hpp>
#include <cudf/types.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_count_valid(
column_view const& values,
rmm::device_vector<size_type> const& group_labels,
size_type num_groups,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(num_groups >= 0, "number of groups cannot be negative");
CUDF_EXPECTS(static_cast<size_t>(values.size()) == group_labels.size(),
"Size of values column should be same as that of group labels");
auto result = make_numeric_column(data_type(type_to_id<size_type>()),
num_groups, mask_state::UNALLOCATED, stream, mr);
if (num_groups == 0) {
return result;
}
if (values.nullable()) {
auto values_view = column_device_view::create(values);
// make_validity_iterator returns a boolean iterator that sums to 1 (1+1=1)
// so we need to transform it to cast it to an integer type
auto bitmask_iterator = thrust::make_transform_iterator(
experimental::detail::make_validity_iterator(*values_view),
[] __device__ (auto b) { return static_cast<size_type>(b); });
thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream),
group_labels.begin(),
group_labels.end(),
bitmask_iterator,
thrust::make_discard_iterator(),
result->mutable_view().begin<size_type>());
} else {
thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream),
group_labels.begin(),
group_labels.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
result->mutable_view().begin<size_type>());
}
return result;
}
std::unique_ptr<column> group_count_all(
rmm::device_vector<size_type> const& group_offsets,
size_type num_groups,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(num_groups >= 0, "number of groups cannot be negative");
auto result = make_numeric_column(data_type(type_to_id<size_type>()),
num_groups, mask_state::UNALLOCATED, stream, mr);
if (num_groups == 0) {
return result;
}
thrust::adjacent_difference(rmm::exec_policy(stream)->on(stream),
group_offsets.begin() + 1,
group_offsets.end(),
result->mutable_view().begin<size_type>());
return result;
}
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
|
4207f301b03b13797f5ee63d2e7260cc9ff952c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
__global__ void CUDA_PixelShifting(PtrStep<unsigned char> src, PtrStep<unsigned char> depth, PtrStep<signed short> dst,
int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows) {
int src_offset = (row * src.step + channels * col);
int dst_offset = (row * dst.step / sizeof(signed short) + channels * col);
int depth_offset = (row * depth.step + col);
int dis = (int)(depth[depth_offset] * 70. / 255.);
//int dis = (int)(depth[depth_offset] * 70.);
if (col > dis) {
dst[dst_offset - (dis * channels) + 0] = src[src_offset + 0];
dst[dst_offset - (dis * channels) + 1] = src[src_offset + 1];
dst[dst_offset - (dis * channels) + 2] = src[src_offset + 2];
}
}
}
__global__ void CUDA_ImagePainting(PtrStep<signed short> img, int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows) {
int rgb_offset = (row * img.step / sizeof(signed short) + channels * col);
if (img[rgb_offset + 0] == -1 && img[rgb_offset + 1] == -1 && img[rgb_offset + 2] == -1) {
for (int offset = 1; offset < 70; offset++) {
if (col - offset >= 0 && img[rgb_offset - (offset * channels) + 0] != -1 && img[rgb_offset - (offset * channels) + 1] != -1 && img[rgb_offset - (offset * channels) + 2] != -1) {
img[rgb_offset + 0] = img[rgb_offset - (offset * channels) + 0];
img[rgb_offset + 1] = img[rgb_offset - (offset * channels) + 1];
img[rgb_offset + 2] = img[rgb_offset - (offset * channels) + 2];
break;
}
if (col + offset <= cols && img[rgb_offset + (offset * channels) + 0] != -1 && img[rgb_offset + (offset * channels) + 1] != -1 && img[rgb_offset + (offset * channels) + 2] != -1) {
img[rgb_offset + 0] = img[rgb_offset + (offset * channels) + 0];
img[rgb_offset + 1] = img[rgb_offset + (offset * channels) + 1];
img[rgb_offset + 2] = img[rgb_offset + (offset * channels) + 2];
break;
}
}
}
}
}
__global__ void CUDA_Concate(PtrStep<unsigned char> src1, PtrStep<unsigned char> src2, PtrStep<unsigned char> dst,
int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows && col % 2 == 0) {
int src_rgb_offset = (row * src1.step + channels * col);
if (col <= cols) {
int dst_rgb_offset = (row * src1.step + channels * col / 2);
dst[dst_rgb_offset + 0] = src1[src_rgb_offset + 0];
dst[dst_rgb_offset + 1] = src1[src_rgb_offset + 1];
dst[dst_rgb_offset + 2] = src1[src_rgb_offset + 2];
dst_rgb_offset += channels * (int)ceil(cols / 2.);
dst[dst_rgb_offset + 0] = src2[src_rgb_offset + 0];
dst[dst_rgb_offset + 1] = src2[src_rgb_offset + 1];
dst[dst_rgb_offset + 2] = src2[src_rgb_offset + 2];
}
}
}
void PixelShifting(PtrStep<unsigned char> src, PtrStep<unsigned char> depth, PtrStep<signed short> dst,
int height, int width, int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_PixelShifting << <dimGrid, dimBlock >> > (src, depth, dst, height, width, channels);
}
void ImagePainting(PtrStep<signed short> img, int height, int width, int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_ImagePainting << <dimGrid, dimBlock >> > (img, height, width, channels);
}
void ImageConcate(PtrStep<unsigned char> src1, PtrStep<unsigned char> src2, PtrStep<unsigned char> dst, int height, int width,
int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_Concate << <dimGrid, dimBlock >> > (src1, src2, dst, height, width, channels);
}
| 4207f301b03b13797f5ee63d2e7260cc9ff952c1.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
__global__ void CUDA_PixelShifting(PtrStep<unsigned char> src, PtrStep<unsigned char> depth, PtrStep<signed short> dst,
int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows) {
int src_offset = (row * src.step + channels * col);
int dst_offset = (row * dst.step / sizeof(signed short) + channels * col);
int depth_offset = (row * depth.step + col);
int dis = (int)(depth[depth_offset] * 70. / 255.);
//int dis = (int)(depth[depth_offset] * 70.);
if (col > dis) {
dst[dst_offset - (dis * channels) + 0] = src[src_offset + 0];
dst[dst_offset - (dis * channels) + 1] = src[src_offset + 1];
dst[dst_offset - (dis * channels) + 2] = src[src_offset + 2];
}
}
}
__global__ void CUDA_ImagePainting(PtrStep<signed short> img, int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows) {
int rgb_offset = (row * img.step / sizeof(signed short) + channels * col);
if (img[rgb_offset + 0] == -1 && img[rgb_offset + 1] == -1 && img[rgb_offset + 2] == -1) {
for (int offset = 1; offset < 70; offset++) {
if (col - offset >= 0 && img[rgb_offset - (offset * channels) + 0] != -1 && img[rgb_offset - (offset * channels) + 1] != -1 && img[rgb_offset - (offset * channels) + 2] != -1) {
img[rgb_offset + 0] = img[rgb_offset - (offset * channels) + 0];
img[rgb_offset + 1] = img[rgb_offset - (offset * channels) + 1];
img[rgb_offset + 2] = img[rgb_offset - (offset * channels) + 2];
break;
}
if (col + offset <= cols && img[rgb_offset + (offset * channels) + 0] != -1 && img[rgb_offset + (offset * channels) + 1] != -1 && img[rgb_offset + (offset * channels) + 2] != -1) {
img[rgb_offset + 0] = img[rgb_offset + (offset * channels) + 0];
img[rgb_offset + 1] = img[rgb_offset + (offset * channels) + 1];
img[rgb_offset + 2] = img[rgb_offset + (offset * channels) + 2];
break;
}
}
}
}
}
__global__ void CUDA_Concate(PtrStep<unsigned char> src1, PtrStep<unsigned char> src2, PtrStep<unsigned char> dst,
int rows, int cols, int channels) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col < cols && row < rows && col % 2 == 0) {
int src_rgb_offset = (row * src1.step + channels * col);
if (col <= cols) {
int dst_rgb_offset = (row * src1.step + channels * col / 2);
dst[dst_rgb_offset + 0] = src1[src_rgb_offset + 0];
dst[dst_rgb_offset + 1] = src1[src_rgb_offset + 1];
dst[dst_rgb_offset + 2] = src1[src_rgb_offset + 2];
dst_rgb_offset += channels * (int)ceil(cols / 2.);
dst[dst_rgb_offset + 0] = src2[src_rgb_offset + 0];
dst[dst_rgb_offset + 1] = src2[src_rgb_offset + 1];
dst[dst_rgb_offset + 2] = src2[src_rgb_offset + 2];
}
}
}
void PixelShifting(PtrStep<unsigned char> src, PtrStep<unsigned char> depth, PtrStep<signed short> dst,
int height, int width, int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_PixelShifting << <dimGrid, dimBlock >> > (src, depth, dst, height, width, channels);
}
void ImagePainting(PtrStep<signed short> img, int height, int width, int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_ImagePainting << <dimGrid, dimBlock >> > (img, height, width, channels);
}
void ImageConcate(PtrStep<unsigned char> src1, PtrStep<unsigned char> src2, PtrStep<unsigned char> dst, int height, int width,
int channels) {
const dim3 dimGrid((int)ceil(width / 16.), (int)ceil(height / 16.));
const dim3 dimBlock(16, 16);
CUDA_Concate << <dimGrid, dimBlock >> > (src1, src2, dst, height, width, channels);
}
|
025246acf2934d748bfa0fb1d3d411f380580800.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <basic_types.h>
#include <util.h>
#include <error.h>
#include <types.h>
#include <matrix_coloring/min_max_2ring.h>
#include <cusp/format.h>
#include <cusp/copy.h>
#include <cusp/detail/random.h>
#include <thrust/count.h>
#include <thrust/extrema.h>
#include <sm_utils.inl>
#define COLORING_DEBUG 1
// Pseudo-random number generator
namespace amgx
{
static __host__ __device__ unsigned int hash_function(unsigned int a, unsigned int seed, unsigned int rows = 0)
{
a ^= seed;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
struct is_zero
{
__host__ __device__
bool operator()(int x)
{
return x == 0;
}
};
// ---------------------------
// Kernels
// ---------------------------
template< int CTA_SIZE, int WARP_SIZE >
__global__
void count_gtlt_kernel( const int A_num_rows,
const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_colors,
int *A_gtlt_count,
const int seed)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
if ( row_color != 0 )
{
if ( lane_id == 0 )
{
A_gtlt_count[row_id] = -1;
}
continue;
}
// Hash my row id.
int row_hash = hash_function( row_id, seed );
// The number of vertices that are greater/smaller than me.
int gt_count = 0, lt_count = 0;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Each thread hashes its column id.
int col_hash = hash_function( col_id, seed );
// Get the color of the column.
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Threads determine if they are greater than the row hash.
int gt_pred = col_color == 0 && col_hash > row_hash;
int lt_pred = col_color == 0 && col_hash < row_hash;
// Count greater/smaller neighbors.
gt_count += __popc( utils::ballot( gt_pred ) );
lt_count += __popc( utils::ballot( lt_pred ) );
}
// The warp leader stores the result.
int my_gtlt_count = (gt_count << 16) | lt_count;
if ( lane_id == 0 )
{
A_gtlt_count[row_id] = my_gtlt_count;
}
}
}
template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION >
__global__
void color_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
if ( row_color != 0 ) // Already colored!!!
{
continue;
}
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
// Split gtlt_count into 2.
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
// Min-max algorithm.
if ( row_gt_count == 0 && row_lt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + (row_id & 1);
}
continue;
}
if ( row_gt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color;
}
continue;
}
if ( row_lt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + 1;
}
continue;
}
// Do we skip it.
int candidate = 1;
// Predicates. Is a vertex min/max.
int is_max_vertex = row_gt_count <= weakness_bound;
int is_min_vertex = row_lt_count <= weakness_bound;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Get the color of the column (it could help late rejection).
if ( LATE_REJECTION )
{
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Late rejection test.
if ( col_color == current_color )
{
is_max_vertex = 0;
}
if ( col_color == current_color + 1 )
{
is_min_vertex = 0;
}
}
// Get the gt/lt count.
int col_gtlt_count = -1;
if ( row_it < row_end && col_id < A_num_rows )
{
col_gtlt_count = A_gtlt_count[col_id];
}
// Split gtlt_count into 2. col_gt/lt_count == -1 if already colored.
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
// Threads determine if they are greater than the row hash.
if ( col_gtlt_count != -1 )
{
is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id <= row_id);
is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id);
}
}
// The warp leader stores the result.
if ( candidate && utils::all( is_max_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color;
}
continue;
}
if ( candidate && utils::all( is_min_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + 1;
}
continue;
}
}
}
template< int CTA_SIZE, int WARP_SIZE >
__global__
void
dbg_check_coloring_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_colors, const int *A_gtlt_count, int *error_found )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Get the color of the column.
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Is there something wrong ??
if ( row_id != col_id && row_color == col_color && row_color != 0)
{
if ( A_gtlt_count != NULL && !error_found[0] )
{
//printf( "row_id=%d, row_color=%d, col_id=%d, col_color=%d\n", row_id, row_color, col_id, col_color );
/*
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
printf( "row_gt_count=%d, row_gt_count=%d\n", row_gt_count, row_lt_count );
int col_gtlt_count = A_gtlt_count[col_id];
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
printf( "col_gt_count=%d, col_gt_count=%d\n", col_gt_count, col_lt_count ); */
}
error_found[0] = 1;
atomicAdd(error_found + 1, 1);
}
}
}
}
// ---------------------------
// Methods
// ---------------------------
template< class T_Config >
Min_Max_2Ring_Matrix_Coloring_Base<T_Config>::Min_Max_2Ring_Matrix_Coloring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope)
{
if ( this->m_coloring_level != 1 )
{
FatalError( "Not implemented for coloring_level != 1", AMGX_ERR_NOT_SUPPORTED_TARGET );
}
if ( cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default") )
{
m_uncolored_fraction = 0.0;
}
else
{
m_uncolored_fraction = cfg.AMG_Config::getParameter<double>("max_uncolored_percentage", cfg_scope);
}
m_weakness_bound = cfg.AMG_Config::getParameter<int>( "weakness_bound", cfg_scope );
m_late_rejection = cfg.AMG_Config::getParameter<int>( "late_rejection", cfg_scope ) != 0;
}
#if !NEW_COLORER_TESTS
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
const int num_rows = A.get_num_rows();
const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows );
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = ::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
this->m_num_colors = 1;
thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 );
cudaCheckError();
device_vector_alloc<int> gtlt_count( num_rows );
for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; )
{
hipLaunchKernelGGL(( count_gtlt_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ), 0);
cudaCheckError();
if ( this->m_late_rejection )
hipLaunchKernelGGL(( color_kernel<CTA_SIZE, 32, true>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
else
hipLaunchKernelGGL(( color_kernel<CTA_SIZE, 32, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
cudaCheckError();
#if 0
device_vector_alloc<int> error_found( 1, 0 );
hipLaunchKernelGGL(( dbg_check_coloring_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
#endif
this->m_num_colors += 2;
num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
}
this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1;
cudaCheckError();
#if 0
device_vector_alloc<int> error_found( 1, 0 );
hipLaunchKernelGGL(( dbg_check_coloring_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
NULL,
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
if ( error_found[0] != 0 )
{
std::cout << "INVALID COLORING !!! Two neighbors have the same color!!!" << std::endl;
}
#endif
A.setView(oldView);
}
#else
template< int CTA_SIZE, int WARP_SIZE >
__global__
void
dbg_coloring_histogram_kernel( int *colors_count, const int A_num_rows, const int *A_colors, const int n_colors )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned int color_counts[256];
if (threadIdx.x < n_colors)
{
color_counts[threadIdx.x] = 0;
}
__syncthreads();
for (; tid < A_num_rows; tid += blockDim.x * gridDim.x)
{
int col = A_colors[tid];
atomicAdd(color_counts + col, 1);
}
__syncthreads();
if (threadIdx.x < n_colors)
{
atomicAdd(colors_count + threadIdx.x, color_counts[threadIdx.x]);
}
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::debug_coloring(Matrix_d &A, int step)
{
#if COLORING_DEBUG
const int num_rows = A.get_num_rows();
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = ::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
int num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
int maxr = A.row_offsets[1] - A.row_offsets[0];
/*for(int i=2;i<=num_rows;i++)
{
int d=A.row_offsets[i]-A.row_offsets[i-1];
if(d>maxr)
{
maxr=d;
}
}*/
device_vector_alloc<int> error_found( 2, 0 );
error_found[0] = 0;
error_found[1] = 0;
hipLaunchKernelGGL(( dbg_check_coloring_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
0,//thrust::raw_pointer_cast( >lt_count.front() ),
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
{
device_vector_alloc<int> color_histogram(this->m_num_colors + 1);
for (int i = 0; i < color_histogram.size(); i++)
{
color_histogram[i] = 0;
}
hipLaunchKernelGGL(( dbg_coloring_histogram_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, thrust::raw_pointer_cast(&color_histogram.front()), num_rows, this->m_row_colors.raw(), this->m_num_colors + 1);
cudaCheckError();
for (int i = 0; i < color_histogram.size(); i++)
{
std::cout << step << "\t" << "H[" << i << "] = " << color_histogram[i] << std::endl;
}
std::cout << step << "\t" << "Errors=" << error_found[1] << std::endl;
std::cout << step << "\t" << "Uncolored=" << num_uncolored << std::endl;
std::cout << step << "\t" << "Num colors=" << this->m_num_colors << "/" << maxr << std::endl;
}
#endif
}
template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION >
__global__
void color_kernel_greedy( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int dice = hash_function(row_id, 19881988, 0);
int row_color = A_colors[row_id];
if ( row_color != 0 ) // Already colored!!!
{
continue;
}
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
// Split gtlt_count into 2.
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
// Do we skip it.
int candidate = 1;
// Predicates. Is a vertex min/max.
int is_max_vertex = true;//row_gt_count <= weakness_bound;
int is_min_vertex = true;//row_lt_count <= weakness_bound;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
unsigned long long used_colors = 0ull;
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
if ( row_it < row_end && col_id < A_num_rows )
{
int col_color = A_colors[col_id];
if ( col_color > 0 )
{
used_colors |= (1ull << (64 - col_color));
}
}
// Get the gt/lt count.
int col_gtlt_count = -1;
if ( row_it < row_end && col_id < A_num_rows )
{
col_gtlt_count = A_gtlt_count[col_id];
}
// Split gtlt_count into 2. col_gt/lt_count == -1 if already colored.
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
// Threads determine if they are greater than the row hash.
//if( col_gt_count != -1 )
// is_max_vertex &= col_gt_count < row_gt_count || (col_gt_count == row_gt_count && col_id < row_id);
//if( col_lt_count != -1 )
// is_min_vertex &= col_lt_count /*>*/ < row_lt_count || (col_lt_count == row_lt_count && col_id > row_id);
if ( col_gtlt_count != -1 )
{
is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id >= row_id);
is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id);
}
}
is_min_vertex = false;
//reduce used colors bit by bit.
#pragma unroll
for (int i = WARP_SIZE / 2; i >= 1; i /= 2)
{
int tmp_hi = __double2hiint( __longlong_as_double( used_colors ) );
int tmp_lo = __double2hiint( __longlong_as_double( used_colors ) );
tmp_hi = utils::shfl_xor(tmp_hi, i, WARP_SIZE);
tmp_lo = utils::shfl_xor(tmp_lo, i, WARP_SIZE);
long long tmp = __double_as_longlong(__hiloint2double(tmp_hi, tmp_lo));
used_colors |= tmp;
}
int my_color = 64 - utils::bfind( ~used_colors );
if (my_color <= 0) { my_color = 1; }
/*int sets=0;
for(int c=1; c<=64; c++)
{
unsigned long long int b = (1ull << (64-c));
if((~used_colors & b) && sets < (dice % 3)-1)
{
sets++;
my_color = c;
}
}*/
// The warp leader stores the result.
if ( candidate && utils::all( is_max_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = my_color;
}
continue;
}
if ( candidate && utils::all( is_min_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = my_color + 1;
}
continue;
}
}
}
template< int CTA_SIZE, int WARP_SIZE>
__global__
void color_kernel_reassign_tail_thread( const int A_num_rows, const int *A_rows, const int *A_cols, int num_colors, int first_tail_color, int *A_colors )
{
const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE;
// Row identifier.
int row_id = blockIdx.x * CTA_SIZE + threadIdx.x;
// Iterate over rows.
for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID )
{
unsigned long long used_colors = 0ull;
int row_color = A_colors[row_id];
if ( row_color < first_tail_color ) // Already colored!!!
{
continue;
}
int row_hash = hash_function(row_id, 198719871987L, 0);
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( int row_it = row_begin; row_it < row_end ; row_it++)
{
int col_id = A_cols[row_it];
if (col_id >= A_num_rows) { continue; }
int col_hash = hash_function( col_id, 1987, 0 );
int col_color = A_colors[col_id];
}
}
}
__global__ void unassign_color(int *A_colors, int num_rows)
{
for (int tid = threadIdx.x + blockIdx.x * blockDim.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
if (tid % 9 == 0)
{
A_colors[tid] = 0;
}
}
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_step( Matrix_d &A, int seed )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
const int num_rows = A.get_num_rows();
const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows );
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = ::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
device_vector_alloc<int> gtlt_count( num_rows );
for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; )
{
hipLaunchKernelGGL(( count_gtlt_kernel<CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
seed);
cudaCheckError();
if ( this->m_late_rejection )
hipLaunchKernelGGL(( color_kernel_greedy<CTA_SIZE, 32, true>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
else
hipLaunchKernelGGL(( color_kernel_greedy<CTA_SIZE, 32, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
cudaCheckError();
this->m_num_colors += 2;
num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
}
this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1;
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_matrix( Matrix_d &A )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
this->m_num_colors = 1;
thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 );
cudaCheckError();
color_step(A, 0);
A.setView(oldView);
}
#endif
#define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // end namespace amgx
| 025246acf2934d748bfa0fb1d3d411f380580800.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <basic_types.h>
#include <util.h>
#include <error.h>
#include <types.h>
#include <matrix_coloring/min_max_2ring.h>
#include <cusp/format.h>
#include <cusp/copy.h>
#include <cusp/detail/random.h>
#include <thrust/count.h>
#include <thrust/extrema.h>
#include <sm_utils.inl>
#define COLORING_DEBUG 1
// Pseudo-random number generator
namespace amgx
{
static __host__ __device__ unsigned int hash_function(unsigned int a, unsigned int seed, unsigned int rows = 0)
{
a ^= seed;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
struct is_zero
{
__host__ __device__
bool operator()(int x)
{
return x == 0;
}
};
// ---------------------------
// Kernels
// ---------------------------
template< int CTA_SIZE, int WARP_SIZE >
__global__
void count_gtlt_kernel( const int A_num_rows,
const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_colors,
int *A_gtlt_count,
const int seed)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
if ( row_color != 0 )
{
if ( lane_id == 0 )
{
A_gtlt_count[row_id] = -1;
}
continue;
}
// Hash my row id.
int row_hash = hash_function( row_id, seed );
// The number of vertices that are greater/smaller than me.
int gt_count = 0, lt_count = 0;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Each thread hashes its column id.
int col_hash = hash_function( col_id, seed );
// Get the color of the column.
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Threads determine if they are greater than the row hash.
int gt_pred = col_color == 0 && col_hash > row_hash;
int lt_pred = col_color == 0 && col_hash < row_hash;
// Count greater/smaller neighbors.
gt_count += __popc( utils::ballot( gt_pred ) );
lt_count += __popc( utils::ballot( lt_pred ) );
}
// The warp leader stores the result.
int my_gtlt_count = (gt_count << 16) | lt_count;
if ( lane_id == 0 )
{
A_gtlt_count[row_id] = my_gtlt_count;
}
}
}
template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION >
__global__
void color_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
if ( row_color != 0 ) // Already colored!!!
{
continue;
}
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
// Split gtlt_count into 2.
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
// Min-max algorithm.
if ( row_gt_count == 0 && row_lt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + (row_id & 1);
}
continue;
}
if ( row_gt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color;
}
continue;
}
if ( row_lt_count == 0 )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + 1;
}
continue;
}
// Do we skip it.
int candidate = 1;
// Predicates. Is a vertex min/max.
int is_max_vertex = row_gt_count <= weakness_bound;
int is_min_vertex = row_lt_count <= weakness_bound;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Get the color of the column (it could help late rejection).
if ( LATE_REJECTION )
{
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Late rejection test.
if ( col_color == current_color )
{
is_max_vertex = 0;
}
if ( col_color == current_color + 1 )
{
is_min_vertex = 0;
}
}
// Get the gt/lt count.
int col_gtlt_count = -1;
if ( row_it < row_end && col_id < A_num_rows )
{
col_gtlt_count = A_gtlt_count[col_id];
}
// Split gtlt_count into 2. col_gt/lt_count == -1 if already colored.
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
// Threads determine if they are greater than the row hash.
if ( col_gtlt_count != -1 )
{
is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id <= row_id);
is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id);
}
}
// The warp leader stores the result.
if ( candidate && utils::all( is_max_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color;
}
continue;
}
if ( candidate && utils::all( is_min_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = current_color + 1;
}
continue;
}
}
}
template< int CTA_SIZE, int WARP_SIZE >
__global__
void
dbg_check_coloring_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_colors, const int *A_gtlt_count, int *error_found )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = A_colors[row_id];
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
// Get the color of the column.
int col_color = -1;
if ( row_it < row_end && col_id < A_num_rows)
{
col_color = A_colors[col_id];
}
// Is there something wrong ??
if ( row_id != col_id && row_color == col_color && row_color != 0)
{
if ( A_gtlt_count != NULL && !error_found[0] )
{
//printf( "row_id=%d, row_color=%d, col_id=%d, col_color=%d\n", row_id, row_color, col_id, col_color );
/*
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
printf( "row_gt_count=%d, row_gt_count=%d\n", row_gt_count, row_lt_count );
int col_gtlt_count = A_gtlt_count[col_id];
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
printf( "col_gt_count=%d, col_gt_count=%d\n", col_gt_count, col_lt_count ); */
}
error_found[0] = 1;
atomicAdd(error_found + 1, 1);
}
}
}
}
// ---------------------------
// Methods
// ---------------------------
template< class T_Config >
Min_Max_2Ring_Matrix_Coloring_Base<T_Config>::Min_Max_2Ring_Matrix_Coloring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope)
{
if ( this->m_coloring_level != 1 )
{
FatalError( "Not implemented for coloring_level != 1", AMGX_ERR_NOT_SUPPORTED_TARGET );
}
if ( cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default") )
{
m_uncolored_fraction = 0.0;
}
else
{
m_uncolored_fraction = cfg.AMG_Config::getParameter<double>("max_uncolored_percentage", cfg_scope);
}
m_weakness_bound = cfg.AMG_Config::getParameter<int>( "weakness_bound", cfg_scope );
m_late_rejection = cfg.AMG_Config::getParameter<int>( "late_rejection", cfg_scope ) != 0;
}
#if !NEW_COLORER_TESTS
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
const int num_rows = A.get_num_rows();
const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows );
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
this->m_num_colors = 1;
thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 );
cudaCheckError();
device_vector_alloc<int> gtlt_count( num_rows );
for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; )
{
count_gtlt_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ), 0);
cudaCheckError();
if ( this->m_late_rejection )
color_kernel<CTA_SIZE, 32, true> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
else
color_kernel<CTA_SIZE, 32, false> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
cudaCheckError();
#if 0
device_vector_alloc<int> error_found( 1, 0 );
dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
#endif
this->m_num_colors += 2;
num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
}
this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1;
cudaCheckError();
#if 0
device_vector_alloc<int> error_found( 1, 0 );
dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
NULL,
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
if ( error_found[0] != 0 )
{
std::cout << "INVALID COLORING !!! Two neighbors have the same color!!!" << std::endl;
}
#endif
A.setView(oldView);
}
#else
template< int CTA_SIZE, int WARP_SIZE >
__global__
void
dbg_coloring_histogram_kernel( int *colors_count, const int A_num_rows, const int *A_colors, const int n_colors )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned int color_counts[256];
if (threadIdx.x < n_colors)
{
color_counts[threadIdx.x] = 0;
}
__syncthreads();
for (; tid < A_num_rows; tid += blockDim.x * gridDim.x)
{
int col = A_colors[tid];
atomicAdd(color_counts + col, 1);
}
__syncthreads();
if (threadIdx.x < n_colors)
{
atomicAdd(colors_count + threadIdx.x, color_counts[threadIdx.x]);
}
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::debug_coloring(Matrix_d &A, int step)
{
#if COLORING_DEBUG
const int num_rows = A.get_num_rows();
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
int num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
int maxr = A.row_offsets[1] - A.row_offsets[0];
/*for(int i=2;i<=num_rows;i++)
{
int d=A.row_offsets[i]-A.row_offsets[i-1];
if(d>maxr)
{
maxr=d;
}
}*/
device_vector_alloc<int> error_found( 2, 0 );
error_found[0] = 0;
error_found[1] = 0;
dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
0,//thrust::raw_pointer_cast( >lt_count.front() ),
thrust::raw_pointer_cast( &error_found.front() ) );
cudaCheckError();
{
device_vector_alloc<int> color_histogram(this->m_num_colors + 1);
for (int i = 0; i < color_histogram.size(); i++)
{
color_histogram[i] = 0;
}
dbg_coloring_histogram_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( thrust::raw_pointer_cast(&color_histogram.front()), num_rows, this->m_row_colors.raw(), this->m_num_colors + 1);
cudaCheckError();
for (int i = 0; i < color_histogram.size(); i++)
{
std::cout << step << "\t" << "H[" << i << "] = " << color_histogram[i] << std::endl;
}
std::cout << step << "\t" << "Errors=" << error_found[1] << std::endl;
std::cout << step << "\t" << "Uncolored=" << num_uncolored << std::endl;
std::cout << step << "\t" << "Num colors=" << this->m_num_colors << "/" << maxr << std::endl;
}
#endif
}
template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION >
__global__
void color_kernel_greedy( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// Thread coordinates.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Row identifier.
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix.
for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID )
{
int dice = hash_function(row_id, 19881988, 0);
int row_color = A_colors[row_id];
if ( row_color != 0 ) // Already colored!!!
{
continue;
}
// The number of vertices that are greater/smaller than me.
int row_gtlt_count = A_gtlt_count[row_id];
// Split gtlt_count into 2.
int row_gt_count = row_gtlt_count >> 16;
int row_lt_count = row_gtlt_count & 0xffff;
// Do we skip it.
int candidate = 1;
// Predicates. Is a vertex min/max.
int is_max_vertex = true;//row_gt_count <= weakness_bound;
int is_min_vertex = true;//row_lt_count <= weakness_bound;
// Iterators over my row.
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
unsigned long long used_colors = 0ull;
for ( ; row_begin < row_end ; row_begin += WARP_SIZE )
{
// Iterator.
int row_it = row_begin + lane_id;
// Get the column index (if the iterator is valid).
int col_id = -1;
if ( row_it < row_end )
{
col_id = A_cols[row_it];
}
if ( row_it < row_end && col_id < A_num_rows )
{
int col_color = A_colors[col_id];
if ( col_color > 0 )
{
used_colors |= (1ull << (64 - col_color));
}
}
// Get the gt/lt count.
int col_gtlt_count = -1;
if ( row_it < row_end && col_id < A_num_rows )
{
col_gtlt_count = A_gtlt_count[col_id];
}
// Split gtlt_count into 2. col_gt/lt_count == -1 if already colored.
int col_gt_count = col_gtlt_count >> 16;
int col_lt_count = col_gtlt_count & 0xffff;
// Threads determine if they are greater than the row hash.
//if( col_gt_count != -1 )
// is_max_vertex &= col_gt_count < row_gt_count || (col_gt_count == row_gt_count && col_id < row_id);
//if( col_lt_count != -1 )
// is_min_vertex &= col_lt_count /*>*/ < row_lt_count || (col_lt_count == row_lt_count && col_id > row_id);
if ( col_gtlt_count != -1 )
{
is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id >= row_id);
is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id);
}
}
is_min_vertex = false;
//reduce used colors bit by bit.
#pragma unroll
for (int i = WARP_SIZE / 2; i >= 1; i /= 2)
{
int tmp_hi = __double2hiint( __longlong_as_double( used_colors ) );
int tmp_lo = __double2hiint( __longlong_as_double( used_colors ) );
tmp_hi = utils::shfl_xor(tmp_hi, i, WARP_SIZE);
tmp_lo = utils::shfl_xor(tmp_lo, i, WARP_SIZE);
long long tmp = __double_as_longlong(__hiloint2double(tmp_hi, tmp_lo));
used_colors |= tmp;
}
int my_color = 64 - utils::bfind( ~used_colors );
if (my_color <= 0) { my_color = 1; }
/*int sets=0;
for(int c=1; c<=64; c++)
{
unsigned long long int b = (1ull << (64-c));
if((~used_colors & b) && sets < (dice % 3)-1)
{
sets++;
my_color = c;
}
}*/
// The warp leader stores the result.
if ( candidate && utils::all( is_max_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = my_color;
}
continue;
}
if ( candidate && utils::all( is_min_vertex ) )
{
if ( lane_id == 0 )
{
A_colors[row_id] = my_color + 1;
}
continue;
}
}
}
template< int CTA_SIZE, int WARP_SIZE>
__global__
void color_kernel_reassign_tail_thread( const int A_num_rows, const int *A_rows, const int *A_cols, int num_colors, int first_tail_color, int *A_colors )
{
const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE;
// Row identifier.
int row_id = blockIdx.x * CTA_SIZE + threadIdx.x;
// Iterate over rows.
for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID )
{
unsigned long long used_colors = 0ull;
int row_color = A_colors[row_id];
if ( row_color < first_tail_color ) // Already colored!!!
{
continue;
}
int row_hash = hash_function(row_id, 198719871987L, 0);
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( int row_it = row_begin; row_it < row_end ; row_it++)
{
int col_id = A_cols[row_it];
if (col_id >= A_num_rows) { continue; }
int col_hash = hash_function( col_id, 1987, 0 );
int col_color = A_colors[col_id];
}
}
}
__global__ void unassign_color(int *A_colors, int num_rows)
{
for (int tid = threadIdx.x + blockIdx.x * blockDim.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
if (tid % 9 == 0)
{
A_colors[tid] = 0;
}
}
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_step( Matrix_d &A, int seed )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
const int num_rows = A.get_num_rows();
const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows );
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA );
device_vector_alloc<int> gtlt_count( num_rows );
for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; )
{
count_gtlt_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
this->m_row_colors.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
seed);
cudaCheckError();
if ( this->m_late_rejection )
color_kernel_greedy<CTA_SIZE, 32, true> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
else
color_kernel_greedy<CTA_SIZE, 32, false> <<< GRID_SIZE, CTA_SIZE>>>(
num_rows,
A.row_offsets.raw(),
A.col_indices.raw(),
thrust::raw_pointer_cast( >lt_count.front() ),
this->m_num_colors,
this->m_weakness_bound,
this->m_row_colors.raw() );
cudaCheckError();
this->m_num_colors += 2;
num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() );
cudaCheckError();
}
this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1;
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_matrix( Matrix_d &A )
{
ViewType oldView = A.currentView();
this->m_row_colors.resize( A.row_offsets.size() - 1 );
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
this->m_num_colors = 1;
thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 );
cudaCheckError();
color_step(A, 0);
A.setView(oldView);
}
#endif
#define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // end namespace amgx
|
10b5126c9a6126984896da3016cc4f8185a5e716.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_A_mul_Bs_32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mx = 1;
int ns = 1;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *sval = NULL;
hipMalloc(&sval, XSIZE*YSIZE);
int *srow = NULL;
hipMalloc(&srow, XSIZE*YSIZE);
int *scol = NULL;
hipMalloc(&scol, XSIZE*YSIZE);
float *k = NULL;
hipMalloc(&k, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_A_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_A_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_A_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 10b5126c9a6126984896da3016cc4f8185a5e716.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_A_mul_Bs_32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mx = 1;
int ns = 1;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *sval = NULL;
cudaMalloc(&sval, XSIZE*YSIZE);
int *srow = NULL;
cudaMalloc(&srow, XSIZE*YSIZE);
int *scol = NULL;
cudaMalloc(&scol, XSIZE*YSIZE);
float *k = NULL;
cudaMalloc(&k, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_A_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_A_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_A_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
441b262127193bb8caa9df71a08964abd1553f79.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "image_template.h"
// helper function for printing out the matrix
void print_matrix(float* matrix, int length)
{
printf("Matrix: ");
for(int i=0; i<length; i++)
{
printf("%f ", matrix[i]);
}
printf("\n");
}
int in_bounds(int x, int y, int w, int h)
{
if(x>=0 && x<w && y>=0 && y<h)
return 1;
else
return 0;
}
// generate the gaussian kernal masks
void gen_gaussians(float** gaus, float** gaus_dv, int* width, float sigma)
{
float a = round(2.5*sigma-0.5);
*width = 2*a+1;
float sum=0;
(*gaus) = (float*)malloc((*width)*sizeof(float));
(*gaus_dv) = (float*)malloc((*width)*sizeof(float));
for(int i=0; i<(*width); i++)
{
(*gaus)[i] = exp((-1*(i-a)*(i-a)) / (2*sigma*sigma));
sum += (*gaus)[i-1];
}
for(int i=0; i<(*width); i++)
(*gaus)[i] /= sum;
for(int i=0; i<(*width); i++)
{
(*gaus_dv)[i] = -1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum += -i*(*gaus_dv)[i-1];
}
for(int i=0; i<(*width); i++)
(*gaus_dv)[i] /= sum;
}
__global__
void convolve(float** gradient, float* image, int img_w ,int img_h, float* kernel, int ker_w, int ker_h)
{
(*gradient) = (float*)malloc(img_h*img_w*sizeof(float));
float sum=0;
int i,j,k,l,m;
i = blockIdx.x*blockDim.x + threadIdx.x;
j = blockIdx.x*blockDim.y + threadIdx.y;
if(i < img_h && j < width)
{
for(int k=0; k<ker_h; k++)
{
for(int m=0; m<ker_w; m++)
{
int offseti = -1*floor(ker_h/2)+k;
int offsetj = -1*floor(ker_w/2)+m;
if(in_bounds(i+offseti, j+offsetj, img_w, img_h))
sum += image[(i+offseti)*img_w+(j+offsetj)]*kernel[k*ker_w+m];
}
}
(*gradient)[i*img_w+j] = sum;
}
}
int main(int argc, char** argv)
{
char* img_name = argv[1];
float sigma = atof(argv[2]);
char name[20];
//CPU device buffer for orginal image
float* org_img;
//GPU device buffer for orig image
float* d_org_img;
//CPU buffers for final output
float* horizontal_gradient, *vertical_gradient;
//GPU buffers for final output
float* d_horizontal_gradient, *d_vertical_gradient;
//GPU buffers to hold intermediate convolution results
float* d_temp_horizontal, *d_temp_vertical;
//CPU buffers to hold convolution masks
float* gaussian_kernel, *gaussian_dv_kernel;
//GPU device buffers to store the convolution masks
float* d_gaussian_kernel, *d_gaussian_dv_kernel;
if(argc != 3)
{
printf("./exec <full image path> <sigma:float>");
exit(1);
}
char* img_name = argv[1];
float sigma = atof(argv[2]);
char name[20];
int ker_width;
int width, height;
gen_gaussians(&gaussian_kernel, &gaussian_dv_kernel, &ker_width, sigma);
read_image_template<float>(img_name, &org_img, &width, &height);
hipMalloc((void **)&d_org_img, sizeof(float)*width*height);
hipMalloc((void **)&d_temp_horizontal, sizeof(float)*width*height);
hipMalloc((void **)&d_temp_vertical, sizeof(float)*width*height);
hipMalloc((void **)&d_horizontal_gradient, sizeof(float)*width*height);
hipMalloc((void **)&d_vertical_gradient, sizeof(float)*width*height);
hipMalloc((void **)&d_gaussian_kernel, sizeof(float)*ker_width);
hipMalloc((void **)&d_gaussian_kernel, sizeof(float)*ker_width);
//offload all of the data to GPU device for convolution
hipMemcpy(d_org_img, org_img, sizeof(float)*width*height, hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_kernel, sizeof(float)*ker_width, hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_dv_kernel, sizeof(flaot)*ker_width, hipMemcpyHostToDevice);
int block_dim = 16;
dim3 dimGrid(ceil(height/block_dim), ceil(width/block_dim), 1);
dim3 dimBlock(block_dim, block_dim, 1);
hipLaunchKernelGGL(( convolve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_temp_horizontal, d_org_img, width, height, gaussian_kernel, ker_width, 1);
hipLaunchKernelGGL(( convolve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_horizontal_gradient, d_temp_horizontal, width, height, gaussian_dv_kernel, 1, ker_width);
hipLaunchKernelGGL(( convolve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_temp_vertical, d_org_img, width, height, gaussian_kernel, 1, ker_width);
hipLaunchKernelGGL(( convolve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertical_gradient, d_temp_vertical, width, height, gaussian_dv_kernel, 1, ker_width);
hipMemcpy(horizontal_gradient, d_horizontal_gradient, sizeof(float)*width*height, hipMemcpyDeviceToHost);
hipMemcpy(vertical_gradient, d_vertical_gradient, sizeof(float)*width*height, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
write_image_template<float>((char *)("horizontal_gradient.pgm"), horizontal_gradient, width*height);
write_image_template<float>((char *)("vertical_gradient.pgm"), vertical_gradient, width*height);
return 0;
}
| 441b262127193bb8caa9df71a08964abd1553f79.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "image_template.h"
// helper function for printing out the matrix
void print_matrix(float* matrix, int length)
{
printf("Matrix: ");
for(int i=0; i<length; i++)
{
printf("%f ", matrix[i]);
}
printf("\n");
}
int in_bounds(int x, int y, int w, int h)
{
if(x>=0 && x<w && y>=0 && y<h)
return 1;
else
return 0;
}
// generate the gaussian kernal masks
void gen_gaussians(float** gaus, float** gaus_dv, int* width, float sigma)
{
float a = round(2.5*sigma-0.5);
*width = 2*a+1;
float sum=0;
(*gaus) = (float*)malloc((*width)*sizeof(float));
(*gaus_dv) = (float*)malloc((*width)*sizeof(float));
for(int i=0; i<(*width); i++)
{
(*gaus)[i] = exp((-1*(i-a)*(i-a)) / (2*sigma*sigma));
sum += (*gaus)[i-1];
}
for(int i=0; i<(*width); i++)
(*gaus)[i] /= sum;
for(int i=0; i<(*width); i++)
{
(*gaus_dv)[i] = -1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum += -i*(*gaus_dv)[i-1];
}
for(int i=0; i<(*width); i++)
(*gaus_dv)[i] /= sum;
}
__global__
void convolve(float** gradient, float* image, int img_w ,int img_h, float* kernel, int ker_w, int ker_h)
{
(*gradient) = (float*)malloc(img_h*img_w*sizeof(float));
float sum=0;
int i,j,k,l,m;
i = blockIdx.x*blockDim.x + threadIdx.x;
j = blockIdx.x*blockDim.y + threadIdx.y;
if(i < img_h && j < width)
{
for(int k=0; k<ker_h; k++)
{
for(int m=0; m<ker_w; m++)
{
int offseti = -1*floor(ker_h/2)+k;
int offsetj = -1*floor(ker_w/2)+m;
if(in_bounds(i+offseti, j+offsetj, img_w, img_h))
sum += image[(i+offseti)*img_w+(j+offsetj)]*kernel[k*ker_w+m];
}
}
(*gradient)[i*img_w+j] = sum;
}
}
int main(int argc, char** argv)
{
char* img_name = argv[1];
float sigma = atof(argv[2]);
char name[20];
//CPU device buffer for orginal image
float* org_img;
//GPU device buffer for orig image
float* d_org_img;
//CPU buffers for final output
float* horizontal_gradient, *vertical_gradient;
//GPU buffers for final output
float* d_horizontal_gradient, *d_vertical_gradient;
//GPU buffers to hold intermediate convolution results
float* d_temp_horizontal, *d_temp_vertical;
//CPU buffers to hold convolution masks
float* gaussian_kernel, *gaussian_dv_kernel;
//GPU device buffers to store the convolution masks
float* d_gaussian_kernel, *d_gaussian_dv_kernel;
if(argc != 3)
{
printf("./exec <full image path> <sigma:float>");
exit(1);
}
char* img_name = argv[1];
float sigma = atof(argv[2]);
char name[20];
int ker_width;
int width, height;
gen_gaussians(&gaussian_kernel, &gaussian_dv_kernel, &ker_width, sigma);
read_image_template<float>(img_name, &org_img, &width, &height);
cudaMalloc((void **)&d_org_img, sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_horizontal, sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_vertical, sizeof(float)*width*height);
cudaMalloc((void **)&d_horizontal_gradient, sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical_gradient, sizeof(float)*width*height);
cudaMalloc((void **)&d_gaussian_kernel, sizeof(float)*ker_width);
cudaMalloc((void **)&d_gaussian_kernel, sizeof(float)*ker_width);
//offload all of the data to GPU device for convolution
cudaMemcpy(d_org_img, org_img, sizeof(float)*width*height, cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_kernel, sizeof(float)*ker_width, cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_dv_kernel, sizeof(flaot)*ker_width, cudaMemcpyHostToDevice);
int block_dim = 16;
dim3 dimGrid(ceil(height/block_dim), ceil(width/block_dim), 1);
dim3 dimBlock(block_dim, block_dim, 1);
convolve<<<dimGrid, dimBlock>>>(d_temp_horizontal, d_org_img, width, height, gaussian_kernel, ker_width, 1);
convolve<<<dimGrid, dimBlock>>>(d_horizontal_gradient, d_temp_horizontal, width, height, gaussian_dv_kernel, 1, ker_width);
convolve<<<dimGrid, dimBlock>>>(d_temp_vertical, d_org_img, width, height, gaussian_kernel, 1, ker_width);
convolve<<<dimGrid, dimBlock>>>(d_vertical_gradient, d_temp_vertical, width, height, gaussian_dv_kernel, 1, ker_width);
cudaMemcpy(horizontal_gradient, d_horizontal_gradient, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
cudaMemcpy(vertical_gradient, d_vertical_gradient, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
write_image_template<float>((char *)("horizontal_gradient.pgm"), horizontal_gradient, width*height);
write_image_template<float>((char *)("vertical_gradient.pgm"), vertical_gradient, width*height);
return 0;
}
|
f21e41249b11534c2f8b11485351a03b65b588ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define CUDA_DEVICE (0)
#define NUM_THREADS (1<<13)
#define BLOCK_DIM (64)
#define GRID_DIM (NUM_THREADS/BLOCK_DIM)
#define NUM_BYTES (NUM_THREADS*4*sizeof(float))
// Compile and run with the commands:
// nvcc float4_test.cu
// ./a.out
//
// Failure occurs on my Tesla C870 card when KERNEL_INVOCATIONS is a large
// number (e.g. 100), and TEST_KERNEL is 1 or 3. Kernels 1 and 3 are those
// which write float4 values to device memory. Failure does not occur on
// my Quadro NVS 290 card for any of the kernels.
//
#define KERNEL_INVOCATIONS (100)
#define TEST_KERNEL (1)
__global__ void testKernel1(float4* g_out, float4* g_in) {
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
g_out[idx] = g_in[idx];
}
__global__ void testKernel2(float* g_out, float4* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float4 f4 = g_in[idx];
g_out[4*idx+0] = f4.x;
g_out[4*idx+1] = f4.y;
g_out[4*idx+2] = f4.z;
g_out[4*idx+3] = f4.w;
}
__global__ void testKernel3(float4* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float x = g_in[4*idx+0];
float y = g_in[4*idx+1];
float z = g_in[4*idx+2];
float w = g_in[4*idx+3];
g_out[idx] = make_float4(x, y, z, w);
}
__global__ void testKernel4(float* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
g_out[NUM_THREADS*0 + idx] = g_in[NUM_THREADS*0 + idx];
g_out[NUM_THREADS*1 + idx] = g_in[NUM_THREADS*1 + idx];
g_out[NUM_THREADS*2 + idx] = g_in[NUM_THREADS*2 + idx];
g_out[NUM_THREADS*3 + idx] = g_in[NUM_THREADS*3 + idx];
}
int main( int argc, char** argv) {
hipSetDevice(CUDA_DEVICE);
float *input = (float *)malloc(NUM_BYTES);
float *output = (float *)malloc(NUM_BYTES);
void* d_input;
void* d_output;
hipMalloc(&d_input, NUM_BYTES);
hipMalloc(&d_output, NUM_BYTES);
for (int i = 0; i < NUM_THREADS*4; i++) {
input[i] = i;
}
hipMemcpy(d_input, input, NUM_BYTES, hipMemcpyHostToDevice);
dim3 gridDim(GRID_DIM, 1, 1);
dim3 blockDim(BLOCK_DIM, 1, 1);
for (int i = 0; i < KERNEL_INVOCATIONS; i++) {
switch (TEST_KERNEL) {
case 1:
hipLaunchKernelGGL(( testKernel1) , dim3(gridDim), dim3(blockDim), 0, 0, (float4 *)d_output, (float4 *)d_input);
break;
case 2:
hipLaunchKernelGGL(( testKernel2) , dim3(gridDim), dim3(blockDim), 0, 0, (float *)d_output, (float4 *)d_input);
break;
case 3:
hipLaunchKernelGGL(( testKernel3) , dim3(gridDim), dim3(blockDim), 0, 0, (float4 *)d_output, (float *)d_input);
break;
case 4:
hipLaunchKernelGGL(( testKernel4) , dim3(gridDim), dim3(blockDim), 0, 0, (float *)d_output, (float *)d_input);
break;
}
hipDeviceSynchronize();
}
hipMemcpy(output, d_output, NUM_BYTES, hipMemcpyDeviceToHost);
for (int i = 0; i < NUM_THREADS*4; i++) {
if (output[i] != i) {
printf("KERNEL=%d FAILED: elem #%d = %f\n", TEST_KERNEL, i, output[i]);
}
}
free(input);
free(output);
hipFree(d_input);
hipFree(d_output);
}
| f21e41249b11534c2f8b11485351a03b65b588ca.cu | #include <stdlib.h>
#include <stdio.h>
#define CUDA_DEVICE (0)
#define NUM_THREADS (1<<13)
#define BLOCK_DIM (64)
#define GRID_DIM (NUM_THREADS/BLOCK_DIM)
#define NUM_BYTES (NUM_THREADS*4*sizeof(float))
// Compile and run with the commands:
// nvcc float4_test.cu
// ./a.out
//
// Failure occurs on my Tesla C870 card when KERNEL_INVOCATIONS is a large
// number (e.g. 100), and TEST_KERNEL is 1 or 3. Kernels 1 and 3 are those
// which write float4 values to device memory. Failure does not occur on
// my Quadro NVS 290 card for any of the kernels.
//
#define KERNEL_INVOCATIONS (100)
#define TEST_KERNEL (1)
__global__ void testKernel1(float4* g_out, float4* g_in) {
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
g_out[idx] = g_in[idx];
}
__global__ void testKernel2(float* g_out, float4* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float4 f4 = g_in[idx];
g_out[4*idx+0] = f4.x;
g_out[4*idx+1] = f4.y;
g_out[4*idx+2] = f4.z;
g_out[4*idx+3] = f4.w;
}
__global__ void testKernel3(float4* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float x = g_in[4*idx+0];
float y = g_in[4*idx+1];
float z = g_in[4*idx+2];
float w = g_in[4*idx+3];
g_out[idx] = make_float4(x, y, z, w);
}
__global__ void testKernel4(float* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
g_out[NUM_THREADS*0 + idx] = g_in[NUM_THREADS*0 + idx];
g_out[NUM_THREADS*1 + idx] = g_in[NUM_THREADS*1 + idx];
g_out[NUM_THREADS*2 + idx] = g_in[NUM_THREADS*2 + idx];
g_out[NUM_THREADS*3 + idx] = g_in[NUM_THREADS*3 + idx];
}
int main( int argc, char** argv) {
cudaSetDevice(CUDA_DEVICE);
float *input = (float *)malloc(NUM_BYTES);
float *output = (float *)malloc(NUM_BYTES);
void* d_input;
void* d_output;
cudaMalloc(&d_input, NUM_BYTES);
cudaMalloc(&d_output, NUM_BYTES);
for (int i = 0; i < NUM_THREADS*4; i++) {
input[i] = i;
}
cudaMemcpy(d_input, input, NUM_BYTES, cudaMemcpyHostToDevice);
dim3 gridDim(GRID_DIM, 1, 1);
dim3 blockDim(BLOCK_DIM, 1, 1);
for (int i = 0; i < KERNEL_INVOCATIONS; i++) {
switch (TEST_KERNEL) {
case 1:
testKernel1 <<<gridDim, blockDim>>> ((float4 *)d_output, (float4 *)d_input);
break;
case 2:
testKernel2 <<<gridDim, blockDim>>> ((float *)d_output, (float4 *)d_input);
break;
case 3:
testKernel3 <<<gridDim, blockDim>>> ((float4 *)d_output, (float *)d_input);
break;
case 4:
testKernel4 <<<gridDim, blockDim>>> ((float *)d_output, (float *)d_input);
break;
}
cudaThreadSynchronize();
}
cudaMemcpy(output, d_output, NUM_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < NUM_THREADS*4; i++) {
if (output[i] != i) {
printf("KERNEL=%d FAILED: elem #%d = %f\n", TEST_KERNEL, i, output[i]);
}
}
free(input);
free(output);
cudaFree(d_input);
cudaFree(d_output);
}
|
cd7df919fdf6ab6ddc21b9da9baa02572c86db8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#include <iostream>
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
__global__ void kernScan1(int n, int d, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
int pow_d_1 = 1 << (d + 1);
int pow_d = 1 << d;
if (k >= n / pow_d_1) {
return;
}
k = k * pow_d_1;
in[k + pow_d_1 - 1] += in[k + pow_d - 1]; // 1 += 0
return;
}
__global__ void kernScan2(int n, int d, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
int pow_d_1 = 1 << (d + 1);
int pow_d = 1 << d;
if (k >= n / pow_d_1) {
return;
}
k = k * pow_d_1;
int t = in[k + pow_d - 1];
in[k + pow_d - 1] = in[k + pow_d_1 - 1];
in[k + pow_d_1 - 1] += t;
return;
}
__global__ void kernPadZero(int idx, int roundup, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= idx && k < roundup) {
in[k] = 0;
}
return;
}
void scan(int n, int *odata, const int *idata) {
int blockSize = 128;
int roundup_n = pow(2, ilog2ceil(n));
int* in;
hipMalloc((void**)&in, roundup_n * sizeof(int));
hipMemcpy(in, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
dim3 blockPerGrid((roundup_n + blockSize - 1) / blockSize);
kernPadZero << <blockPerGrid, roundup_n>>>(n, roundup_n, in);
int num = 0;
for (int d = 0; d <= ilog2ceil(n) - 1; d++) {
num = roundup_n / pow(2, d + 1);
dim3 blockPerGridLoop1((num + blockSize - 1) / blockSize);
kernScan1 << <blockPerGridLoop1, blockSize >> > (roundup_n, d, in);
}
//kernPadZero << <blockPerGrid, roundup_n >> > (roundup_n - 1, roundup_n, in);
hipMemset(in + roundup_n - 1, 0, sizeof(int));
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
num = roundup_n / (1 << (d + 1));
dim3 blockPerGridLoop2((num + blockSize - 1) / blockSize);
kernScan2 << <blockPerGridLoop2, blockSize >> > (roundup_n, d, in);
}
timer().endGpuTimer();
hipMemcpy(odata, in, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(in);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int blockSize = 128;
int roundup_n = pow(2, ilog2ceil(n));
int* in;
hipMalloc((void**)&in, n * sizeof(int));
int* out;
hipMalloc((void**)&out, n * sizeof(int));
int* scan_res;
hipMalloc((void**)&scan_res, n * sizeof(int));
int* bools;
hipMalloc((void**)&bools, n * sizeof(int));
hipMemcpy(in, idata, sizeof(int) * n, hipMemcpyHostToDevice);
int ctr = 0;
//timer().startGpuTimer();
dim3 blockPerGrid((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <blockPerGrid ,blockSize>> > (n, bools, in);
scan(n, scan_res, bools);
StreamCompaction::Common::kernScatter << <blockPerGrid, blockSize>> > (n, out, in, bools, scan_res);
//timer().endGpuTimer();
int* bools_last = new int[0];
hipMemcpy(bools_last, bools + n - 1, sizeof(int), hipMemcpyDeviceToHost);
int* scan_res_last = new int[0];
hipMemcpy(scan_res_last, scan_res + n - 1, sizeof(int), hipMemcpyDeviceToHost);
if (bools_last[0] == 1) {
ctr = scan_res_last[0] + 1;
}
else {
ctr = scan_res_last[0];
}
hipMemcpy(odata, out, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(in);
hipFree(out);
hipFree(scan_res);
hipFree(bools);
delete(bools_last);
delete(scan_res_last);
return ctr;
}
}
}
| cd7df919fdf6ab6ddc21b9da9baa02572c86db8b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#include <iostream>
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
__global__ void kernScan1(int n, int d, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
int pow_d_1 = 1 << (d + 1);
int pow_d = 1 << d;
if (k >= n / pow_d_1) {
return;
}
k = k * pow_d_1;
in[k + pow_d_1 - 1] += in[k + pow_d - 1]; // 1 += 0
return;
}
__global__ void kernScan2(int n, int d, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
int pow_d_1 = 1 << (d + 1);
int pow_d = 1 << d;
if (k >= n / pow_d_1) {
return;
}
k = k * pow_d_1;
int t = in[k + pow_d - 1];
in[k + pow_d - 1] = in[k + pow_d_1 - 1];
in[k + pow_d_1 - 1] += t;
return;
}
__global__ void kernPadZero(int idx, int roundup, int* in) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= idx && k < roundup) {
in[k] = 0;
}
return;
}
void scan(int n, int *odata, const int *idata) {
int blockSize = 128;
int roundup_n = pow(2, ilog2ceil(n));
int* in;
cudaMalloc((void**)&in, roundup_n * sizeof(int));
cudaMemcpy(in, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
dim3 blockPerGrid((roundup_n + blockSize - 1) / blockSize);
kernPadZero << <blockPerGrid, roundup_n>>>(n, roundup_n, in);
int num = 0;
for (int d = 0; d <= ilog2ceil(n) - 1; d++) {
num = roundup_n / pow(2, d + 1);
dim3 blockPerGridLoop1((num + blockSize - 1) / blockSize);
kernScan1 << <blockPerGridLoop1, blockSize >> > (roundup_n, d, in);
}
//kernPadZero << <blockPerGrid, roundup_n >> > (roundup_n - 1, roundup_n, in);
cudaMemset(in + roundup_n - 1, 0, sizeof(int));
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
num = roundup_n / (1 << (d + 1));
dim3 blockPerGridLoop2((num + blockSize - 1) / blockSize);
kernScan2 << <blockPerGridLoop2, blockSize >> > (roundup_n, d, in);
}
timer().endGpuTimer();
cudaMemcpy(odata, in, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(in);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int blockSize = 128;
int roundup_n = pow(2, ilog2ceil(n));
int* in;
cudaMalloc((void**)&in, n * sizeof(int));
int* out;
cudaMalloc((void**)&out, n * sizeof(int));
int* scan_res;
cudaMalloc((void**)&scan_res, n * sizeof(int));
int* bools;
cudaMalloc((void**)&bools, n * sizeof(int));
cudaMemcpy(in, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
int ctr = 0;
//timer().startGpuTimer();
dim3 blockPerGrid((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <blockPerGrid ,blockSize>> > (n, bools, in);
scan(n, scan_res, bools);
StreamCompaction::Common::kernScatter << <blockPerGrid, blockSize>> > (n, out, in, bools, scan_res);
//timer().endGpuTimer();
int* bools_last = new int[0];
cudaMemcpy(bools_last, bools + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
int* scan_res_last = new int[0];
cudaMemcpy(scan_res_last, scan_res + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
if (bools_last[0] == 1) {
ctr = scan_res_last[0] + 1;
}
else {
ctr = scan_res_last[0];
}
cudaMemcpy(odata, out, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(in);
cudaFree(out);
cudaFree(scan_res);
cudaFree(bools);
delete(bools_last);
delete(scan_res_last);
return ctr;
}
}
}
|
12ec0a2e58ae0038c1cd2ea4b010d2159545495f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void updateCMax(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax)
{
*d_cMax = 0; int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double ro, p, u;
__shared__ double c;
for (int i = index; i < nbrOfGrids; i += stride){
if (d_u1[i] == 0)
continue;
ro = d_u1[i];
u = d_u2[i] / ro;
p = (d_u3[i] - ro * u * u / 2) * (*d_gama - 1);
c = sqrt(*d_gama * abs(p) / ro);
if (*d_cMax < c + abs(u))
*d_cMax = c + abs(u);
}
}
__global__ void updateTau(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax, const double *d_h, const double *d_cfl, double *d_tau) {
updateCMax(nbrOfGrids, d_u1, d_u2, d_u3, d_gama, d_cMax);
*d_tau = *d_cfl * *d_h / *d_cMax;
} | 12ec0a2e58ae0038c1cd2ea4b010d2159545495f.cu | #include "includes.h"
__device__ void updateCMax(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax)
{
*d_cMax = 0; int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double ro, p, u;
__shared__ double c;
for (int i = index; i < nbrOfGrids; i += stride){
if (d_u1[i] == 0)
continue;
ro = d_u1[i];
u = d_u2[i] / ro;
p = (d_u3[i] - ro * u * u / 2) * (*d_gama - 1);
c = sqrt(*d_gama * abs(p) / ro);
if (*d_cMax < c + abs(u))
*d_cMax = c + abs(u);
}
}
__global__ void updateTau(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax, const double *d_h, const double *d_cfl, double *d_tau) {
updateCMax(nbrOfGrids, d_u1, d_u2, d_u3, d_gama, d_cMax);
*d_tau = *d_cfl * *d_h / *d_cMax;
} |
5cb32f18f8fbac34e18e77787263e04dc67c9368.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Test of DeviceSelect::If and DevicePartition::If utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/iterator/reverse_iterator.h>
#include <newcub/util_allocator.cuh>
#include <newcub/device/device_select.cuh>
#include <newcub/device/device_partition.cuh>
#include <newcub/iterator/counting_input_iterator.cuh>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
// Selection functor type
template <typename T>
struct LessThan
{
T compare;
__host__ __device__ __forceinline__
LessThan(T compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const T &a) const {
return (a < compare);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op);
}
OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper);
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_out_wrapper,
d_out_unselected,
select_op);
}
OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper);
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, CastOp<bool>());
}
OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper);
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_flags_wrapper,
d_out_wrapper,
d_out_unselected,
CastOp<bool>());
}
OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper);
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
__global__ void CnpDispatchKernel(
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
hipError_t Dispatch(
Int2Type<CDP> dispatch_to,
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
T* h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
// Initialize each item to a randomly selected value from [0..126]
unsigned int value;
RandomBits(value, 0, 0, 7);
if (value == 127)
value = 126;
InitValue(INTEGER_SEED, h_in[i], value);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve selection problem (and set corresponding flags)
*/
template <
typename InputIteratorT,
typename FlagIteratorT,
typename SelectOpT,
typename T>
int Solve(
InputIteratorT h_in,
SelectOpT select_op,
T* h_reference,
FlagIteratorT h_flags,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if ((h_flags[i] = select_op(h_in[i])))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename DeviceInputIteratorT,
typename FlagT,
typename SelectOpT,
typename T>
void Test(
DeviceInputIteratorT d_in,
FlagT* h_flags,
SelectOpT select_op,
T* h_reference,
int num_selected,
int num_items)
{
// Allocate device flags, output, and num-selected
FlagT* d_flags = NULL;
T* d_out = NULL;
int* d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t* d_temp_storage_bytes = NULL;
hipError_t* d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Copy flags and clear device output array
CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(hipMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
// Check for correctness (and display results, if specified)
int compare1 = (IS_PARTITION) ?
CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :
CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s\n", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s\n", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int num_output_items = (IS_PARTITION) ? num_items : num_selected;
int num_flag_items = (IS_FLAGGED) ? num_items : 0;
size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items;
float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test on pointer type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestPointer(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_in = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
T* h_reference = new T[num_items];
// Initialize input
Initialize(h_in, num_items);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nPointer %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test on iterator type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestIterator(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_reference = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
// Use counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nIterator %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
}
/**
* Test different selection ratios
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void Test(
int num_items)
{
for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f)
{
TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio);
}
}
/**
* Test (select vs. partition) and (flagged vs. functor)
*/
template <
Backend BACKEND,
typename T>
void TestMethod(
int num_items)
{
// Functor
Test<BACKEND, false, false, T>(num_items);
Test<BACKEND, false, true, T>(num_items);
// Flagged
Test<BACKEND, true, false, T>(num_items);
Test<BACKEND, true, true, T>(num_items);
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
TestMethod<CUB, T>(num_items);
#ifdef CUB_CDP
TestMethod<CDP, T>(num_items);
#endif
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
/**
* Test select/partition on pointer types
*/
template <typename T>
void ComparePointer(
int num_items,
float select_ratio)
{
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, T>(num_items, select_ratio);
TestPointer<THRUST, false, false, T>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, T>(num_items, select_ratio);
TestPointer<THRUST, false, true, T>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, T>(num_items, select_ratio);
TestPointer<THRUST, true, false, T>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, T>(num_items, select_ratio);
TestPointer<THRUST, true, true, T>(num_items, select_ratio);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
float select_ratio = 0.5;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("ratio", select_ratio);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--ratio=<selection ratio, default 0.5>] "
"[--repeat=<repetitions of entire test suite>] "
"[--v] "
"[--cdp] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
#ifdef QUICKER_TEST
// Compile/run basic CUB test
if (num_items < 0) num_items = 32000000;
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, int>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, int>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, int>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, int>(num_items, select_ratio);
#elif defined(QUICK_TEST)
// Get device ordinal
int device_ordinal;
CubDebugExit(hipGetDevice(&device_ordinal));
// Get device SM version
int sm_version;
CubDebugExit(SmVersion(sm_version, device_ordinal));
// Compile/run quick tests
if (num_items < 0) num_items = 32000000;
printf("-- Iterator ----------------------------\n");
TestIterator<CUB, false, false, int>(num_items, select_ratio);
ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio);
ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio);
ComparePointer<int>( num_items, select_ratio);
ComparePointer<long long>( num_items / 2, select_ratio);
ComparePointer<TestFoo>( num_items / 4, select_ratio);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
}
#endif
return 0;
}
| 5cb32f18f8fbac34e18e77787263e04dc67c9368.cu | /*
* Copyright (c) 2011, Duane Merrill
* Copyright (c) 2011-2018, NVIDIA CORPORATION
* Copyright (c) 2020 Savely Pototsky (SavaLione)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/******************************************************************************
* Test of DeviceSelect::If and DevicePartition::If utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/iterator/reverse_iterator.h>
#include <newcub/util_allocator.cuh>
#include <newcub/device/device_select.cuh>
#include <newcub/device/device_partition.cuh>
#include <newcub/iterator/counting_input_iterator.cuh>
#include <test_util.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
// Selection functor type
template <typename T>
struct LessThan
{
T compare;
__host__ __device__ __forceinline__
LessThan(T compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const T &a) const {
return (a < compare);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op);
}
OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper);
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_out_wrapper,
d_out_unselected,
select_op);
}
OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper);
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, CastOp<bool>());
}
OffsetT num_selected = OffsetT(d_out_wrapper_end - d_out_wrapper);
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_flags_wrapper,
d_out_wrapper,
d_out_unselected,
CastOp<bool>());
}
OffsetT num_selected = OffsetT(d_out_wrapper_end.first - d_out_wrapper);
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
__global__ void CnpDispatchKernel(
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
cudaError_t Dispatch(
Int2Type<CDP> dispatch_to,
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<<<1,1>>>(is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
T* h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
// Initialize each item to a randomly selected value from [0..126]
unsigned int value;
RandomBits(value, 0, 0, 7);
if (value == 127)
value = 126;
InitValue(INTEGER_SEED, h_in[i], value);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve selection problem (and set corresponding flags)
*/
template <
typename InputIteratorT,
typename FlagIteratorT,
typename SelectOpT,
typename T>
int Solve(
InputIteratorT h_in,
SelectOpT select_op,
T* h_reference,
FlagIteratorT h_flags,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if ((h_flags[i] = select_op(h_in[i])))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename DeviceInputIteratorT,
typename FlagT,
typename SelectOpT,
typename T>
void Test(
DeviceInputIteratorT d_in,
FlagT* h_flags,
SelectOpT select_op,
T* h_reference,
int num_selected,
int num_items)
{
// Allocate device flags, output, and num-selected
FlagT* d_flags = NULL;
T* d_out = NULL;
int* d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t* d_temp_storage_bytes = NULL;
cudaError_t* d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Copy flags and clear device output array
CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
// Check for correctness (and display results, if specified)
int compare1 = (IS_PARTITION) ?
CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :
CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s\n", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s\n", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int num_output_items = (IS_PARTITION) ? num_items : num_selected;
int num_flag_items = (IS_FLAGGED) ? num_items : 0;
size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items;
float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test on pointer type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestPointer(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_in = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
T* h_reference = new T[num_items];
// Initialize input
Initialize(h_in, num_items);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nPointer %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test on iterator type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestIterator(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_reference = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
// Use counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nIterator %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
}
/**
* Test different selection ratios
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void Test(
int num_items)
{
for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f)
{
TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio);
}
}
/**
* Test (select vs. partition) and (flagged vs. functor)
*/
template <
Backend BACKEND,
typename T>
void TestMethod(
int num_items)
{
// Functor
Test<BACKEND, false, false, T>(num_items);
Test<BACKEND, false, true, T>(num_items);
// Flagged
Test<BACKEND, true, false, T>(num_items);
Test<BACKEND, true, true, T>(num_items);
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
TestMethod<CUB, T>(num_items);
#ifdef CUB_CDP
TestMethod<CDP, T>(num_items);
#endif
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
/**
* Test select/partition on pointer types
*/
template <typename T>
void ComparePointer(
int num_items,
float select_ratio)
{
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, T>(num_items, select_ratio);
TestPointer<THRUST, false, false, T>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, T>(num_items, select_ratio);
TestPointer<THRUST, false, true, T>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, T>(num_items, select_ratio);
TestPointer<THRUST, true, false, T>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, T>(num_items, select_ratio);
TestPointer<THRUST, true, true, T>(num_items, select_ratio);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
float select_ratio = 0.5;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("ratio", select_ratio);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--ratio=<selection ratio, default 0.5>] "
"[--repeat=<repetitions of entire test suite>] "
"[--v] "
"[--cdp] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
#ifdef QUICKER_TEST
// Compile/run basic CUB test
if (num_items < 0) num_items = 32000000;
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, int>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, int>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, int>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, int>(num_items, select_ratio);
#elif defined(QUICK_TEST)
// Get device ordinal
int device_ordinal;
CubDebugExit(cudaGetDevice(&device_ordinal));
// Get device SM version
int sm_version;
CubDebugExit(SmVersion(sm_version, device_ordinal));
// Compile/run quick tests
if (num_items < 0) num_items = 32000000;
printf("-- Iterator ----------------------------\n");
TestIterator<CUB, false, false, int>(num_items, select_ratio);
ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio);
ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio);
ComparePointer<int>( num_items, select_ratio);
ComparePointer<long long>( num_items / 2, select_ratio);
ComparePointer<TestFoo>( num_items / 4, select_ratio);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
}
#endif
return 0;
}
|
ee0e1112b465f14ef1746c8b11457df521c37ca2.hip | // !!! This is a file automatically generated by hipify!!!
#include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "stream_allocator.h"
#include <cuml/manifold/umapparams.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <cuml/manifold/umap.hpp>
#include <Rcpp.h>
#include <cmath>
#include <memory>
#include <type_traits>
#include <vector>
namespace {
/*
* The 'ML::UMAPParams::target_weights' parameter was renamed to 'target_weight'
* at some point, so, using SFINAE here to be compatible with both versions of
* the 'ML::UMAPParams' definitions.
*/
// for cuML v21.06 or above
template <typename T>
__host__ void set_target_weight(
T& params,
typename std::remove_reference<decltype(T::target_weight)>::type const w) {
params.target_weight = w;
}
// for earlier versions of cuML
template <typename T>
__host__ void set_target_weight(
T& params,
typename std::remove_reference<decltype(T::target_weights)>::type const w) {
params.target_weights = w;
}
} // namespace
namespace cuml4r {
__host__ Rcpp::List umap_fit(
Rcpp::NumericMatrix const& x, Rcpp::NumericVector const& y,
int const n_neighbors, int const n_components, int const n_epochs,
float const learning_rate, float const min_dist, float const spread,
float const set_op_mix_ratio, int const local_connectivity,
float const repulsion_strength, int const negative_sample_rate,
float const transform_queue_size, int const verbosity, float const a,
float const b, int const init, int const target_n_neighbors,
int const target_metric, float const target_weight,
uint64_t const random_state, bool const deterministic) {
Rcpp::List model;
auto const m_x = cuml4r::Matrix<float>(x, /*transpose=*/false);
auto const n_samples = m_x.numRows;
auto const n_features = m_x.numCols;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
auto params = std::make_unique<ML::UMAPParams>();
params->n_neighbors = n_neighbors;
params->n_components = n_components;
params->n_epochs = n_epochs;
params->learning_rate = learning_rate;
params->min_dist = min_dist;
params->spread = spread;
params->set_op_mix_ratio = set_op_mix_ratio;
params->local_connectivity = local_connectivity;
params->repulsion_strength = repulsion_strength;
params->negative_sample_rate = negative_sample_rate;
params->transform_queue_size = transform_queue_size;
params->verbosity = verbosity;
if (std::isnan(a) || std::isnan(b)) {
ML::UMAP::find_ab(handle, params.get());
} else {
params->a = a;
params->b = b;
}
params->init = init;
params->target_n_neighbors = target_n_neighbors;
params->target_metric =
static_cast<ML::UMAPParams::MetricType>(target_metric);
set_target_weight(*params, target_weight);
params->random_state = random_state;
params->deterministic = deterministic;
// UMAP input
auto const& h_x = m_x.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
stream_view.value(), h_x.cbegin(), h_x.cend(), d_x.begin());
thrust::device_vector<float> d_y;
cuml4r::unique_marker y_h2d;
if (y.size() > 0) {
auto const h_y = Rcpp::as<cuml4r::pinned_host_vector<float>>(y);
d_y.resize(y.size());
y_h2d = cuml4r::async_copy(stream_view.value(), h_y.cbegin(), h_y.cend(),
d_y.begin());
}
// UMAP output
thrust::device_vector<float> d_embedding(n_samples * n_components);
ML::UMAP::fit(handle, /*X=*/d_x.data().get(),
/*y=*/(y.size() > 0 ? d_y.data().get() : nullptr),
/*n=*/n_samples,
/*d=*/n_features,
/*knn_indices=*/nullptr,
/*knn_dists=*/nullptr,
/*params=*/params.get(),
/*embeddings=*/d_embedding.data().get());
cuml4r::pinned_host_vector<float> h_embedding(d_embedding.size());
auto CUML4R_ANONYMOUS_VARIABLE(embedding_d2h) =
cuml4r::async_copy(stream_view.value(), d_embedding.cbegin(),
d_embedding.cend(), h_embedding.begin());
CUDA_RT_CALL(hipStreamSynchronize(stream_view.value()));
model["umap_params"] = Rcpp::XPtr<ML::UMAPParams>(params.release());
model["embedding"] = Rcpp::transpose(
Rcpp::NumericMatrix(n_components, n_samples, h_embedding.begin()));
model["n_samples"] = n_samples;
model["x"] = x;
return model;
}
__host__ Rcpp::NumericMatrix umap_transform(Rcpp::List const& model,
Rcpp::NumericMatrix const& x) {
auto const m_x = cuml4r::Matrix<float>(x, /*transpose=*/false);
auto const n_samples = m_x.numRows;
auto const n_features = m_x.numCols;
auto const m_orig = cuml4r::Matrix<float>(model["x"], /*transpose=*/false);
auto const m_embedding =
cuml4r::Matrix<float>(model["embedding"], /*transpose=*/false);
Rcpp::XPtr<ML::UMAPParams> params = model["umap_params"];
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
// UMAP transform input
auto const& h_x = m_x.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
stream_view.value(), h_x.cbegin(), h_x.cend(), d_x.begin());
auto const& h_orig_x = m_orig.values;
thrust::device_vector<float> d_orig_x(h_orig_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(orig_x_h2d) = cuml4r::async_copy(
stream_view.value(), h_orig_x.cbegin(), h_orig_x.cend(), d_orig_x.begin());
auto const& h_embedding = m_embedding.values;
thrust::device_vector<float> d_embedding(h_embedding.size());
auto CUML4R_ANONYMOUS_VARIABLE(orig_x_h2d) =
cuml4r::async_copy(stream_view.value(), h_embedding.cbegin(),
h_embedding.cend(), d_embedding.begin());
// UMAP transform output
thrust::device_vector<float> d_transformed(n_samples * m_embedding.numCols);
ML::UMAP::transform(
handle, /*X=*/d_x.data().get(), /*n=*/n_samples, /*d=*/n_features,
/*knn_indices=*/nullptr, /*knn_dists=*/nullptr,
/*orig_x=*/d_orig_x.data().get(),
/*orig_n=*/m_orig.numRows, /*embedding=*/d_embedding.data().get(),
/*embedding_n=*/m_embedding.numRows,
/*params=*/params.get(), /*transformed=*/d_transformed.data().get());
cuml4r::pinned_host_vector<float> h_transformed(d_transformed.size());
auto CUML4R_ANONYMOUS_VARIABLE(transformed_d2h) =
cuml4r::async_copy(stream_view.value(), d_transformed.cbegin(),
d_transformed.cend(), h_transformed.begin());
CUDA_RT_CALL(hipStreamSynchronize(stream_view.value()));
return Rcpp::transpose(
Rcpp::NumericMatrix(m_embedding.numCols, n_samples, h_transformed.begin()));
}
__host__ Rcpp::List umap_get_state(Rcpp::List const& model) {
Rcpp::List state;
{
Rcpp::List umap_params_list;
Rcpp::XPtr<ML::UMAPParams const> const umap_params = model["umap_params"];
umap_params_list["n_neighbors"] = umap_params->n_neighbors;
umap_params_list["n_components"] = umap_params->n_components;
umap_params_list["n_epochs"] = umap_params->n_epochs;
umap_params_list["learning_rate"] = umap_params->learning_rate;
umap_params_list["min_dist"] = umap_params->min_dist;
umap_params_list["spread"] = umap_params->spread;
umap_params_list["set_op_mix_ratio"] = umap_params->set_op_mix_ratio;
umap_params_list["local_connectivity"] = umap_params->local_connectivity;
umap_params_list["repulsion_strength"] = umap_params->repulsion_strength;
umap_params_list["negative_sample_rate"] =
umap_params->negative_sample_rate;
umap_params_list["transform_queue_size"] =
umap_params->transform_queue_size;
umap_params_list["verbosity"] = umap_params->verbosity;
umap_params_list["a"] = umap_params->a;
umap_params_list["b"] = umap_params->b;
umap_params_list["init"] = umap_params->init;
umap_params_list["target_n_neighbors"] = umap_params->target_n_neighbors;
umap_params_list["target_metric"] =
static_cast<int>(umap_params->target_metric);
umap_params_list["target_weight"] =
static_cast<int>(umap_params->target_weight);
umap_params_list["random_state"] = umap_params->random_state;
umap_params_list["deterministic"] = umap_params->deterministic;
state["umap_params"] = std::move(umap_params_list);
}
state["embedding"] = model["embedding"];
state["n_samples"] = model["n_samples"];
state["x"] = model["x"];
return state;
}
__host__ Rcpp::List umap_set_state(Rcpp::List const& state) {
Rcpp::List model;
{
auto umap_params = std::make_unique<ML::UMAPParams>();
Rcpp::List const& umap_params_list = state["umap_params"];
umap_params->n_neighbors = umap_params_list["n_neighbors"];
umap_params->n_components = umap_params_list["n_components"];
umap_params->n_epochs = umap_params_list["n_epochs"];
umap_params->learning_rate = umap_params_list["learning_rate"];
umap_params->min_dist = umap_params_list["min_dist"];
umap_params->spread = umap_params_list["spread"];
umap_params->set_op_mix_ratio = umap_params_list["set_op_mix_ratio"];
umap_params->local_connectivity = umap_params_list["local_connectivity"];
umap_params->repulsion_strength = umap_params_list["repulsion_strength"];
umap_params->negative_sample_rate =
umap_params_list["negative_sample_rate"];
umap_params->transform_queue_size =
umap_params_list["transform_queue_size"];
umap_params->verbosity = umap_params_list["verbosity"];
umap_params->a = umap_params_list["a"];
umap_params->b = umap_params_list["b"];
umap_params->init = umap_params_list["init"];
umap_params->target_n_neighbors = umap_params_list["target_n_neighbors"];
umap_params->target_metric = static_cast<ML::UMAPParams::MetricType>(
Rcpp::as<int>(umap_params_list["target_metric"]));
umap_params->target_weight = umap_params_list["target_weight"];
umap_params->random_state = umap_params_list["random_state"];
umap_params->deterministic = umap_params_list["deterministic"];
model["umap_params"] = Rcpp::XPtr<ML::UMAPParams>(umap_params.release());
}
model["embedding"] = state["embedding"];
model["n_samples"] = state["n_samples"];
model["x"] = state["x"];
return model;
}
} // namespace cuml4r
| ee0e1112b465f14ef1746c8b11457df521c37ca2.cu | #include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "stream_allocator.h"
#include <cuml/manifold/umapparams.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <cuml/manifold/umap.hpp>
#include <Rcpp.h>
#include <cmath>
#include <memory>
#include <type_traits>
#include <vector>
namespace {
/*
* The 'ML::UMAPParams::target_weights' parameter was renamed to 'target_weight'
* at some point, so, using SFINAE here to be compatible with both versions of
* the 'ML::UMAPParams' definitions.
*/
// for cuML v21.06 or above
template <typename T>
__host__ void set_target_weight(
T& params,
typename std::remove_reference<decltype(T::target_weight)>::type const w) {
params.target_weight = w;
}
// for earlier versions of cuML
template <typename T>
__host__ void set_target_weight(
T& params,
typename std::remove_reference<decltype(T::target_weights)>::type const w) {
params.target_weights = w;
}
} // namespace
namespace cuml4r {
__host__ Rcpp::List umap_fit(
Rcpp::NumericMatrix const& x, Rcpp::NumericVector const& y,
int const n_neighbors, int const n_components, int const n_epochs,
float const learning_rate, float const min_dist, float const spread,
float const set_op_mix_ratio, int const local_connectivity,
float const repulsion_strength, int const negative_sample_rate,
float const transform_queue_size, int const verbosity, float const a,
float const b, int const init, int const target_n_neighbors,
int const target_metric, float const target_weight,
uint64_t const random_state, bool const deterministic) {
Rcpp::List model;
auto const m_x = cuml4r::Matrix<float>(x, /*transpose=*/false);
auto const n_samples = m_x.numRows;
auto const n_features = m_x.numCols;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
auto params = std::make_unique<ML::UMAPParams>();
params->n_neighbors = n_neighbors;
params->n_components = n_components;
params->n_epochs = n_epochs;
params->learning_rate = learning_rate;
params->min_dist = min_dist;
params->spread = spread;
params->set_op_mix_ratio = set_op_mix_ratio;
params->local_connectivity = local_connectivity;
params->repulsion_strength = repulsion_strength;
params->negative_sample_rate = negative_sample_rate;
params->transform_queue_size = transform_queue_size;
params->verbosity = verbosity;
if (std::isnan(a) || std::isnan(b)) {
ML::UMAP::find_ab(handle, params.get());
} else {
params->a = a;
params->b = b;
}
params->init = init;
params->target_n_neighbors = target_n_neighbors;
params->target_metric =
static_cast<ML::UMAPParams::MetricType>(target_metric);
set_target_weight(*params, target_weight);
params->random_state = random_state;
params->deterministic = deterministic;
// UMAP input
auto const& h_x = m_x.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
stream_view.value(), h_x.cbegin(), h_x.cend(), d_x.begin());
thrust::device_vector<float> d_y;
cuml4r::unique_marker y_h2d;
if (y.size() > 0) {
auto const h_y = Rcpp::as<cuml4r::pinned_host_vector<float>>(y);
d_y.resize(y.size());
y_h2d = cuml4r::async_copy(stream_view.value(), h_y.cbegin(), h_y.cend(),
d_y.begin());
}
// UMAP output
thrust::device_vector<float> d_embedding(n_samples * n_components);
ML::UMAP::fit(handle, /*X=*/d_x.data().get(),
/*y=*/(y.size() > 0 ? d_y.data().get() : nullptr),
/*n=*/n_samples,
/*d=*/n_features,
/*knn_indices=*/nullptr,
/*knn_dists=*/nullptr,
/*params=*/params.get(),
/*embeddings=*/d_embedding.data().get());
cuml4r::pinned_host_vector<float> h_embedding(d_embedding.size());
auto CUML4R_ANONYMOUS_VARIABLE(embedding_d2h) =
cuml4r::async_copy(stream_view.value(), d_embedding.cbegin(),
d_embedding.cend(), h_embedding.begin());
CUDA_RT_CALL(cudaStreamSynchronize(stream_view.value()));
model["umap_params"] = Rcpp::XPtr<ML::UMAPParams>(params.release());
model["embedding"] = Rcpp::transpose(
Rcpp::NumericMatrix(n_components, n_samples, h_embedding.begin()));
model["n_samples"] = n_samples;
model["x"] = x;
return model;
}
__host__ Rcpp::NumericMatrix umap_transform(Rcpp::List const& model,
Rcpp::NumericMatrix const& x) {
auto const m_x = cuml4r::Matrix<float>(x, /*transpose=*/false);
auto const n_samples = m_x.numRows;
auto const n_features = m_x.numCols;
auto const m_orig = cuml4r::Matrix<float>(model["x"], /*transpose=*/false);
auto const m_embedding =
cuml4r::Matrix<float>(model["embedding"], /*transpose=*/false);
Rcpp::XPtr<ML::UMAPParams> params = model["umap_params"];
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
// UMAP transform input
auto const& h_x = m_x.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
stream_view.value(), h_x.cbegin(), h_x.cend(), d_x.begin());
auto const& h_orig_x = m_orig.values;
thrust::device_vector<float> d_orig_x(h_orig_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(orig_x_h2d) = cuml4r::async_copy(
stream_view.value(), h_orig_x.cbegin(), h_orig_x.cend(), d_orig_x.begin());
auto const& h_embedding = m_embedding.values;
thrust::device_vector<float> d_embedding(h_embedding.size());
auto CUML4R_ANONYMOUS_VARIABLE(orig_x_h2d) =
cuml4r::async_copy(stream_view.value(), h_embedding.cbegin(),
h_embedding.cend(), d_embedding.begin());
// UMAP transform output
thrust::device_vector<float> d_transformed(n_samples * m_embedding.numCols);
ML::UMAP::transform(
handle, /*X=*/d_x.data().get(), /*n=*/n_samples, /*d=*/n_features,
/*knn_indices=*/nullptr, /*knn_dists=*/nullptr,
/*orig_x=*/d_orig_x.data().get(),
/*orig_n=*/m_orig.numRows, /*embedding=*/d_embedding.data().get(),
/*embedding_n=*/m_embedding.numRows,
/*params=*/params.get(), /*transformed=*/d_transformed.data().get());
cuml4r::pinned_host_vector<float> h_transformed(d_transformed.size());
auto CUML4R_ANONYMOUS_VARIABLE(transformed_d2h) =
cuml4r::async_copy(stream_view.value(), d_transformed.cbegin(),
d_transformed.cend(), h_transformed.begin());
CUDA_RT_CALL(cudaStreamSynchronize(stream_view.value()));
return Rcpp::transpose(
Rcpp::NumericMatrix(m_embedding.numCols, n_samples, h_transformed.begin()));
}
__host__ Rcpp::List umap_get_state(Rcpp::List const& model) {
Rcpp::List state;
{
Rcpp::List umap_params_list;
Rcpp::XPtr<ML::UMAPParams const> const umap_params = model["umap_params"];
umap_params_list["n_neighbors"] = umap_params->n_neighbors;
umap_params_list["n_components"] = umap_params->n_components;
umap_params_list["n_epochs"] = umap_params->n_epochs;
umap_params_list["learning_rate"] = umap_params->learning_rate;
umap_params_list["min_dist"] = umap_params->min_dist;
umap_params_list["spread"] = umap_params->spread;
umap_params_list["set_op_mix_ratio"] = umap_params->set_op_mix_ratio;
umap_params_list["local_connectivity"] = umap_params->local_connectivity;
umap_params_list["repulsion_strength"] = umap_params->repulsion_strength;
umap_params_list["negative_sample_rate"] =
umap_params->negative_sample_rate;
umap_params_list["transform_queue_size"] =
umap_params->transform_queue_size;
umap_params_list["verbosity"] = umap_params->verbosity;
umap_params_list["a"] = umap_params->a;
umap_params_list["b"] = umap_params->b;
umap_params_list["init"] = umap_params->init;
umap_params_list["target_n_neighbors"] = umap_params->target_n_neighbors;
umap_params_list["target_metric"] =
static_cast<int>(umap_params->target_metric);
umap_params_list["target_weight"] =
static_cast<int>(umap_params->target_weight);
umap_params_list["random_state"] = umap_params->random_state;
umap_params_list["deterministic"] = umap_params->deterministic;
state["umap_params"] = std::move(umap_params_list);
}
state["embedding"] = model["embedding"];
state["n_samples"] = model["n_samples"];
state["x"] = model["x"];
return state;
}
__host__ Rcpp::List umap_set_state(Rcpp::List const& state) {
Rcpp::List model;
{
auto umap_params = std::make_unique<ML::UMAPParams>();
Rcpp::List const& umap_params_list = state["umap_params"];
umap_params->n_neighbors = umap_params_list["n_neighbors"];
umap_params->n_components = umap_params_list["n_components"];
umap_params->n_epochs = umap_params_list["n_epochs"];
umap_params->learning_rate = umap_params_list["learning_rate"];
umap_params->min_dist = umap_params_list["min_dist"];
umap_params->spread = umap_params_list["spread"];
umap_params->set_op_mix_ratio = umap_params_list["set_op_mix_ratio"];
umap_params->local_connectivity = umap_params_list["local_connectivity"];
umap_params->repulsion_strength = umap_params_list["repulsion_strength"];
umap_params->negative_sample_rate =
umap_params_list["negative_sample_rate"];
umap_params->transform_queue_size =
umap_params_list["transform_queue_size"];
umap_params->verbosity = umap_params_list["verbosity"];
umap_params->a = umap_params_list["a"];
umap_params->b = umap_params_list["b"];
umap_params->init = umap_params_list["init"];
umap_params->target_n_neighbors = umap_params_list["target_n_neighbors"];
umap_params->target_metric = static_cast<ML::UMAPParams::MetricType>(
Rcpp::as<int>(umap_params_list["target_metric"]));
umap_params->target_weight = umap_params_list["target_weight"];
umap_params->random_state = umap_params_list["random_state"];
umap_params->deterministic = umap_params_list["deterministic"];
model["umap_params"] = Rcpp::XPtr<ML::UMAPParams>(umap_params.release());
}
model["embedding"] = state["embedding"];
model["n_samples"] = state["n_samples"];
model["x"] = state["x"];
return model;
}
} // namespace cuml4r
|
e8f028faeaf37543ea9965b66c81438325533c4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 32
inline hipError_t cudaCheckError_inline(hipError_t result) {
if (result != hipSuccess) {
fprintf(stderr, "CUDA error %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
__global__ void matrixMultiplication(int* dev_a, int* dev_b, int* dev_c, int row_a, int col_a, int col_b) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
// each (row, col) pair will match on element in resulting matrix -> with shape (row_a, col_b)
int ret=0;
if (row < row_a && col < col_b) {
for(int i=0; i<col_a; ++i) {
ret += dev_a[row * col_a + i] * dev_b[i * col_b + col];//original b
//ret += dev_a[row*col_a + i] * dev_b[col*col_b + i];//transposed b, but NOT speeduped
}
dev_c[row*col_b + col] = ret;
}
}
__global__ void matrixTranspose(int* in_mat, int* out_mat, int dim_rows, int dim_cols) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < dim_rows && col < dim_cols) {
unsigned int new_pos = col * dim_cols + row;
out_mat[new_pos] = in_mat[row*dim_cols + col];
}
}
void matrixTranspose_cpu(int* in_mat, int* out_mat, int dim_rows, int dim_cols) {
for (int i=0; i<dim_rows; ++i) {
for (int j=0; j<dim_cols; ++j) {
unsigned int new_pos = j*dim_cols + i;
out_mat[new_pos] = in_mat[i*dim_cols+j];
}
}
}
void matrixMultiplication_cpu(int* host_a, int* host_b, int* host_c, int row_a, int col_a, int col_b) {
for (int i=0; i<row_a; ++i) {
for (int j=0; j<col_b; ++j) {
int tmp=0;
for (int k=0; k<col_a; ++k) {
//tmp += host_a[i*col_a+k] * host_b[k*col_b+j];//original b
tmp += host_a[i*col_a+k] * host_b[j*col_b+k];//transposed b for speeduped cpu multiplication
}
host_c[i*col_b + j] = tmp;
}
}
}
void matrixMultiplication_cpu_cache_friendly(int* host_a, int* host_b, int* host_c, int row_a, int col_a, int col_b) {
for (int i=0; i<row_a; ++i) {
for(int k=0; k<col_a; ++k) {
int tmp = host_a[i*col_a + k];
for(int j=0; j<col_b; ++j) {
host_c[i*col_b+j] += tmp * host_b[k*col_b+j];
}
}
}
}
bool verifyResult(int* h_c, int* h_c_result, int rows, int cols) {
for(int i=0; i<rows; ++i) {
for(int j=0; j<cols; ++j) {
if(h_c[i*cols + j] != h_c_result[i*cols + j]){
printf("Host: %d, Device: %d\n", h_c[i*cols + j], h_c_result[i*cols + j]);
return false;
}
}
}
return true;
}
int main(int argc, char* argv[]) {
if(argc != 4){
fprintf(stderr, "%s", "Usage: ./a.out $row_A $col_A $col_B $thread_count_in_block in 1Dim direction\n");
exit(-1);
}
int row_a = atoi(argv[1]);
int col_a = atoi(argv[2]);
int col_b = atoi(argv[3]);
int deviceId;
hipGetDevice(&deviceId);
float gpu_elapsed_time_ms;
//float cpu_elapsed_time_ms;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Start counting execution time of device computation
hipEventRecord(start, 0);
int* h_a, *h_b, *h_c, *h_c_result;
cudaCheckError_inline(hipMallocManaged(&h_a, sizeof(int)*row_a*col_a));
cudaCheckError_inline(hipMallocManaged(&h_b, sizeof(int)*col_a*col_b));
cudaCheckError_inline(hipMallocManaged(&h_c, sizeof(int)*row_a*col_b));
cudaCheckError_inline(hipMallocManaged(&h_c_result, sizeof(int)*row_a*col_b));
//Random initialized matrix a on host
for(int i=0; i<row_a; ++i) {
for(int j=0; j<col_a; ++j) {
h_a[i*col_a+j] = rand() % 1024;
}
}
//Random initialized matrix b on host
for(int i=0; i<col_a; ++i) {
for(int j=0; j<col_b; ++j) {
h_b[i*col_b+j] = rand() % 1024;
}
}
cudaCheckError_inline(hipMemPrefetchAsync(h_a, sizeof(int)*row_a*col_a, deviceId));
cudaCheckError_inline(hipMemPrefetchAsync(h_b, sizeof(int)*col_a*col_b, deviceId));
cudaCheckError_inline(hipMemPrefetchAsync(h_c, sizeof(int)*row_a*col_b, deviceId));
int grid_row = (row_a + BLOCK_SIZE - 1) / BLOCK_SIZE;
int grid_col = (col_b + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_col, grid_row);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( matrixMultiplication), dim3(dimGrid), dim3(dimBlock), 0, 0, h_a, h_b, h_c, row_a, col_a, col_b);
cudaCheckError_inline(hipDeviceSynchronize());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n", row_a, col_a, col_a, col_b, gpu_elapsed_time_ms);
/*
//Start counting execution time of cpu computation
hipEventRecord(start, 0);
matrixMultiplication_cpu(h_a, h_b, h_c_result, row_a, col_a, col_b);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n", row_a, col_a, col_a, col_b, cpu_elapsed_time_ms);
bool check = verifyResult(h_c, h_c_result, row_a, col_b);
if(!check) {
fprintf(stderr, "Error, result not matched.\n");
exit(4);
}else{
printf("Congratulations, results match !!\n");
}
float speedups = cpu_elapsed_time_ms / gpu_elapsed_time_ms;
printf("Overall speedup = %f\n", speedups);
*/
cudaCheckError_inline(hipFree(h_a));
cudaCheckError_inline(hipFree(h_b));
cudaCheckError_inline(hipFree(h_c));
}
| e8f028faeaf37543ea9965b66c81438325533c4f.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 32
inline cudaError_t cudaCheckError_inline(cudaError_t result) {
if (result != cudaSuccess) {
fprintf(stderr, "CUDA error %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__ void matrixMultiplication(int* dev_a, int* dev_b, int* dev_c, int row_a, int col_a, int col_b) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
// each (row, col) pair will match on element in resulting matrix -> with shape (row_a, col_b)
int ret=0;
if (row < row_a && col < col_b) {
for(int i=0; i<col_a; ++i) {
ret += dev_a[row * col_a + i] * dev_b[i * col_b + col];//original b
//ret += dev_a[row*col_a + i] * dev_b[col*col_b + i];//transposed b, but NOT speeduped
}
dev_c[row*col_b + col] = ret;
}
}
__global__ void matrixTranspose(int* in_mat, int* out_mat, int dim_rows, int dim_cols) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < dim_rows && col < dim_cols) {
unsigned int new_pos = col * dim_cols + row;
out_mat[new_pos] = in_mat[row*dim_cols + col];
}
}
void matrixTranspose_cpu(int* in_mat, int* out_mat, int dim_rows, int dim_cols) {
for (int i=0; i<dim_rows; ++i) {
for (int j=0; j<dim_cols; ++j) {
unsigned int new_pos = j*dim_cols + i;
out_mat[new_pos] = in_mat[i*dim_cols+j];
}
}
}
void matrixMultiplication_cpu(int* host_a, int* host_b, int* host_c, int row_a, int col_a, int col_b) {
for (int i=0; i<row_a; ++i) {
for (int j=0; j<col_b; ++j) {
int tmp=0;
for (int k=0; k<col_a; ++k) {
//tmp += host_a[i*col_a+k] * host_b[k*col_b+j];//original b
tmp += host_a[i*col_a+k] * host_b[j*col_b+k];//transposed b for speeduped cpu multiplication
}
host_c[i*col_b + j] = tmp;
}
}
}
void matrixMultiplication_cpu_cache_friendly(int* host_a, int* host_b, int* host_c, int row_a, int col_a, int col_b) {
for (int i=0; i<row_a; ++i) {
for(int k=0; k<col_a; ++k) {
int tmp = host_a[i*col_a + k];
for(int j=0; j<col_b; ++j) {
host_c[i*col_b+j] += tmp * host_b[k*col_b+j];
}
}
}
}
bool verifyResult(int* h_c, int* h_c_result, int rows, int cols) {
for(int i=0; i<rows; ++i) {
for(int j=0; j<cols; ++j) {
if(h_c[i*cols + j] != h_c_result[i*cols + j]){
printf("Host: %d, Device: %d\n", h_c[i*cols + j], h_c_result[i*cols + j]);
return false;
}
}
}
return true;
}
int main(int argc, char* argv[]) {
if(argc != 4){
fprintf(stderr, "%s", "Usage: ./a.out $row_A $col_A $col_B $thread_count_in_block in 1Dim direction\n");
exit(-1);
}
int row_a = atoi(argv[1]);
int col_a = atoi(argv[2]);
int col_b = atoi(argv[3]);
int deviceId;
cudaGetDevice(&deviceId);
float gpu_elapsed_time_ms;
//float cpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start counting execution time of device computation
cudaEventRecord(start, 0);
int* h_a, *h_b, *h_c, *h_c_result;
cudaCheckError_inline(cudaMallocManaged(&h_a, sizeof(int)*row_a*col_a));
cudaCheckError_inline(cudaMallocManaged(&h_b, sizeof(int)*col_a*col_b));
cudaCheckError_inline(cudaMallocManaged(&h_c, sizeof(int)*row_a*col_b));
cudaCheckError_inline(cudaMallocManaged(&h_c_result, sizeof(int)*row_a*col_b));
//Random initialized matrix a on host
for(int i=0; i<row_a; ++i) {
for(int j=0; j<col_a; ++j) {
h_a[i*col_a+j] = rand() % 1024;
}
}
//Random initialized matrix b on host
for(int i=0; i<col_a; ++i) {
for(int j=0; j<col_b; ++j) {
h_b[i*col_b+j] = rand() % 1024;
}
}
cudaCheckError_inline(cudaMemPrefetchAsync(h_a, sizeof(int)*row_a*col_a, deviceId));
cudaCheckError_inline(cudaMemPrefetchAsync(h_b, sizeof(int)*col_a*col_b, deviceId));
cudaCheckError_inline(cudaMemPrefetchAsync(h_c, sizeof(int)*row_a*col_b, deviceId));
int grid_row = (row_a + BLOCK_SIZE - 1) / BLOCK_SIZE;
int grid_col = (col_b + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_col, grid_row);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
matrixMultiplication<<<dimGrid, dimBlock>>>(h_a, h_b, h_c, row_a, col_a, col_b);
cudaCheckError_inline(cudaDeviceSynchronize());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n", row_a, col_a, col_a, col_b, gpu_elapsed_time_ms);
/*
//Start counting execution time of cpu computation
cudaEventRecord(start, 0);
matrixMultiplication_cpu(h_a, h_b, h_c_result, row_a, col_a, col_b);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n", row_a, col_a, col_a, col_b, cpu_elapsed_time_ms);
bool check = verifyResult(h_c, h_c_result, row_a, col_b);
if(!check) {
fprintf(stderr, "Error, result not matched.\n");
exit(4);
}else{
printf("Congratulations, results match !!\n");
}
float speedups = cpu_elapsed_time_ms / gpu_elapsed_time_ms;
printf("Overall speedup = %f\n", speedups);
*/
cudaCheckError_inline(cudaFree(h_a));
cudaCheckError_inline(cudaFree(h_b));
cudaCheckError_inline(cudaFree(h_c));
}
|
ad46bc5dc4b80c6efd99f20d46cdc4b8b750ab09.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
| ad46bc5dc4b80c6efd99f20d46cdc4b8b750ab09.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
|
decc28eb5716a10fc5b2ef9ab1a9ebbcd75ed67e.hip | // !!! This is a file automatically generated by hipify!!!
// Generated by Hybridizer version 1.2.10484
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#if defined(__HIPCC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
#if CUDART_VERSION >= 9000
#include <hip/hip_cooperative_groups.h>
#endif
// hybridizer core types
#include <cstdint>
namespace hybridizer { struct hybridobject ; }
namespace hybridizer { struct runtime ; }
#pragma region defined enums and types
#if defined(__cplusplus) || defined(__HIPCC__)
namespace Stim {
struct Program ;
} // Leaving namespace
namespace System {
struct Console ;
} // Leaving namespace
// Intrinsic type TextWriter used
#define __TYPE_DECL_FILE___
namespace Stim {
struct Program___c__DisplayClass1_0 ;
} // Leaving namespace
namespace System { namespace Threading { namespace Tasks {
struct Parallel ;
} } } // Leaving namespace
// Intrinsic type Nullable`1 used
#define __TYPE_DECL_hybridizer_nullable__int64_t____
namespace System { namespace Threading { namespace Tasks {
struct ParallelLoopResult ;
} } } // Leaving namespace
// Intrinsic type Action`1 used
#define __TYPE_DECL_hybridizer_action__int____
#endif
#pragma endregion
extern "C" void* __hybridizer_init_basic_runtime();
#include <cstdio>
// generating GetTypeID function
#include <cstring> // for strcmp
extern "C" DLL_PUBLIC int HybridizerGetTypeID( const char* fullTypeName)
{
if (strcmp (fullTypeName, "Stim.Program") == 0) return 1000000 ;
if (strcmp (fullTypeName, "Stim.Program+<>c__DisplayClass1_0") == 0) return 1000001 ;
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 1000002 ;
if (strcmp (fullTypeName, "System.Console") == 0) return 1000003 ;
if (strcmp (fullTypeName, "System.IFormatProvider") == 0) return 1000004 ;
if (strcmp (fullTypeName, "System.IO.TextWriter") == 0) return 1000005 ;
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 1000006 ;
if (strcmp (fullTypeName, "System.Object") == 0) return 1000007 ;
if (strcmp (fullTypeName, "System.String") == 0) return 1000008 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.Parallel") == 0) return 1000009 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 1000010 ;
return 0 ;
}
extern "C" DLL_PUBLIC const char* HybridizerGetTypeFromID( const int typeId)
{
if (typeId == 1000000) return "Stim.Program" ;
if (typeId == 1000001) return "Stim.Program+<>c__DisplayClass1_0" ;
if (typeId == 1000002) return "System.Action<System.Int32>" ;
if (typeId == 1000003) return "System.Console" ;
if (typeId == 1000004) return "System.IFormatProvider" ;
if (typeId == 1000005) return "System.IO.TextWriter" ;
if (typeId == 1000006) return "System.Nullable<System.Int64>" ;
if (typeId == 1000007) return "System.Object" ;
if (typeId == 1000008) return "System.String" ;
if (typeId == 1000009) return "System.Threading.Tasks.Parallel" ;
if (typeId == 1000010) return "System.Threading.Tasks.ParallelLoopResult" ;
return "" ;
}
extern "C" DLL_PUBLIC int HybridizerGetShallowSize (const char* fullTypeName)
{
#ifdef __TYPE_DECL__Stim_Program___
if (strcmp (fullTypeName, "Stim.Program") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__Stim_Program___c__DisplayClass1_0__
if (strcmp (fullTypeName, "Stim.Program+<>c__DisplayClass1_0") == 0) return 24 ;
#endif
#ifdef __TYPE_DECL_hybridizer_action__T____
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 24 ;
#endif
#ifdef __TYPE_DECL_hybridizer_nullable__T____
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_hybridobject___
if (strcmp (fullTypeName, "System.Object") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL_hybridizer_string__
if (strcmp (fullTypeName, "System.String") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL__System_Threading_Tasks_ParallelLoopResult__
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 24 ;
#endif
return 0 ;
}
// Get various Hybridizer properties at runtime
struct __hybridizer_properties {
int32_t UseHybridArrays;
int32_t Flavor;
int32_t CompatibilityMode;
int32_t DelegateSupport;
int32_t _dummy;
};
extern "C" DLL_PUBLIC __hybridizer_properties __HybridizerGetProperties () {
__hybridizer_properties res;
res.UseHybridArrays = 0;
res.Flavor = 1;
res.DelegateSupport = 0;
res.CompatibilityMode = 0;
return res ;
}
#include <hip/hip_runtime.h>
struct HybridModule
{
void* module_data ;
hipModule_t module ;
} ;
extern char __hybridizer_cubin_module_data [] ;
static HybridModule __hybridizer__gs_module = { 0 };
static int __hybridizer_initialized = 0;
// error code translation from hipError_t to hipError_t
namespace hybridizer {
hipError_t translateCUresult(int cures)
{
switch (cures)
{
case hipSuccess: return hipSuccess ;
case hipErrorInvalidValue: return hipErrorInvalidValue ;
case hipErrorLaunchFailure: return hipErrorLaunchFailure ;
case hipErrorNotSupported: return hipErrorNotSupported ;
case hipErrorIllegalInstruction : return hipErrorLaunchFailure ;
default: return hipErrorUnknown ;
}
}
} // namespace hybridizer
#pragma region Wrappers definitions
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_OccupancyCalculator_MaxActiveBlocksPerSM(int* numBlocks, int blockSize, int sharedMemSize)
{
if (0 == __hybridizer_initialized)
{
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData(&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data);
__hybridizer__gs_module.module_data = (void*)__hybridizer_cubin_module_data;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures);
}
hipFunction_t __hybridizer__cufunc;
cures = hipModuleGetFunction(&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello");
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures);
return hipModuleOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, __hybridizer__cufunc, blockSize, sharedMemSize);
}
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[2] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperStream_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, hipStream_t st)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[3] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[2] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
#endif
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperStreamGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, hipStream_t st)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[3] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#endif
extern "C" DLL_PUBLIC int Stimx46Programx46Add_OccupancyCalculator_MaxActiveBlocksPerSM(int* numBlocks, int blockSize, int sharedMemSize)
{
if (0 == __hybridizer_initialized)
{
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData(&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data);
__hybridizer__gs_module.module_data = (void*)__hybridizer_cubin_module_data;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures);
}
hipFunction_t __hybridizer__cufunc;
cures = hipModuleGetFunction(&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add");
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures);
return hipModuleOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, __hybridizer__cufunc, blockSize, sharedMemSize);
}
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperStream_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, hipStream_t st, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config, 0) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::hipDeviceSynchronize () ;
return __synchronizeRes ;
}
#endif
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperStreamGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, hipStream_t st, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
hipDeviceSynchronize();
__hybridizer_initialized = 1;
}
hipError_t cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = hipModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
}
hipFunction_t __hybridizer__cufunc ;
cures = hipModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = hipModuleLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config) ;
if (cures != hipSuccess) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::hipPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#endif
#pragma endregion
| decc28eb5716a10fc5b2ef9ab1a9ebbcd75ed67e.cu | // Generated by Hybridizer version 1.2.10484
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#if defined(__CUDACC__)
#ifndef hyb_device
#define hyb_inline __forceinline__
#define hyb_constant __constant__
#if defined(HYBRIDIZER_NO_HOST)
#define hyb_host
#define hyb_device __device__
#else
#define hyb_host __host__
#define hyb_device __device__
#endif
#endif
#else
#ifndef hyb_device
#define hyb_inline inline
#define hyb_device
#define hyb_constant
#endif
#endif
#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__
#define BUILDING_DLL
#ifdef BUILDING_DLL
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllexport))
#else
#define DLL_PUBLIC __declspec(dllexport) // Note: actually gcc seems to also supports this syntax.
#endif
#else
#ifdef __GNUC__
#define DLL_PUBLIC __attribute__ ((dllimport))
#else
#define DLL_PUBLIC __declspec(dllimport) // Note: actually gcc seems to also supports this syntax.
#endif
#endif
#define DLL_LOCAL
#else
#if __GNUC__ >= 4
#define DLL_PUBLIC __attribute__ ((visibility ("default")))
#define DLL_LOCAL __attribute__ ((visibility ("hidden")))
#else
#define DLL_PUBLIC
#define DLL_LOCAL
#endif
#endif
#if CUDART_VERSION >= 9000
#include <cooperative_groups.h>
#endif
// hybridizer core types
#include <cstdint>
namespace hybridizer { struct hybridobject ; }
namespace hybridizer { struct runtime ; }
#pragma region defined enums and types
#if defined(__cplusplus) || defined(__CUDACC__)
namespace Stim {
struct Program ;
} // Leaving namespace
namespace System {
struct Console ;
} // Leaving namespace
// Intrinsic type TextWriter used
#define __TYPE_DECL_FILE___
namespace Stim {
struct Program___c__DisplayClass1_0 ;
} // Leaving namespace
namespace System { namespace Threading { namespace Tasks {
struct Parallel ;
} } } // Leaving namespace
// Intrinsic type Nullable`1 used
#define __TYPE_DECL_hybridizer_nullable__int64_t____
namespace System { namespace Threading { namespace Tasks {
struct ParallelLoopResult ;
} } } // Leaving namespace
// Intrinsic type Action`1 used
#define __TYPE_DECL_hybridizer_action__int____
#endif
#pragma endregion
extern "C" void* __hybridizer_init_basic_runtime();
#include <cstdio>
// generating GetTypeID function
#include <cstring> // for strcmp
extern "C" DLL_PUBLIC int HybridizerGetTypeID( const char* fullTypeName)
{
if (strcmp (fullTypeName, "Stim.Program") == 0) return 1000000 ;
if (strcmp (fullTypeName, "Stim.Program+<>c__DisplayClass1_0") == 0) return 1000001 ;
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 1000002 ;
if (strcmp (fullTypeName, "System.Console") == 0) return 1000003 ;
if (strcmp (fullTypeName, "System.IFormatProvider") == 0) return 1000004 ;
if (strcmp (fullTypeName, "System.IO.TextWriter") == 0) return 1000005 ;
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 1000006 ;
if (strcmp (fullTypeName, "System.Object") == 0) return 1000007 ;
if (strcmp (fullTypeName, "System.String") == 0) return 1000008 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.Parallel") == 0) return 1000009 ;
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 1000010 ;
return 0 ;
}
extern "C" DLL_PUBLIC const char* HybridizerGetTypeFromID( const int typeId)
{
if (typeId == 1000000) return "Stim.Program" ;
if (typeId == 1000001) return "Stim.Program+<>c__DisplayClass1_0" ;
if (typeId == 1000002) return "System.Action<System.Int32>" ;
if (typeId == 1000003) return "System.Console" ;
if (typeId == 1000004) return "System.IFormatProvider" ;
if (typeId == 1000005) return "System.IO.TextWriter" ;
if (typeId == 1000006) return "System.Nullable<System.Int64>" ;
if (typeId == 1000007) return "System.Object" ;
if (typeId == 1000008) return "System.String" ;
if (typeId == 1000009) return "System.Threading.Tasks.Parallel" ;
if (typeId == 1000010) return "System.Threading.Tasks.ParallelLoopResult" ;
return "" ;
}
extern "C" DLL_PUBLIC int HybridizerGetShallowSize (const char* fullTypeName)
{
#ifdef __TYPE_DECL__Stim_Program___
if (strcmp (fullTypeName, "Stim.Program") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL__Stim_Program___c__DisplayClass1_0__
if (strcmp (fullTypeName, "Stim.Program+<>c__DisplayClass1_0") == 0) return 24 ;
#endif
#ifdef __TYPE_DECL_hybridizer_action__T____
if (strcmp (fullTypeName, "System.Action<System.Int32>") == 0) return 24 ;
#endif
#ifdef __TYPE_DECL_hybridizer_nullable__T____
if (strcmp (fullTypeName, "System.Nullable<System.Int64>") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL_hybridizer_hybridobject___
if (strcmp (fullTypeName, "System.Object") == 0) return 8 ;
#endif
#ifdef __TYPE_DECL_hybridizer_string__
if (strcmp (fullTypeName, "System.String") == 0) return 16 ;
#endif
#ifdef __TYPE_DECL__System_Threading_Tasks_ParallelLoopResult__
if (strcmp (fullTypeName, "System.Threading.Tasks.ParallelLoopResult") == 0) return 24 ;
#endif
return 0 ;
}
// Get various Hybridizer properties at runtime
struct __hybridizer_properties {
int32_t UseHybridArrays;
int32_t Flavor;
int32_t CompatibilityMode;
int32_t DelegateSupport;
int32_t _dummy;
};
extern "C" DLL_PUBLIC __hybridizer_properties __HybridizerGetProperties () {
__hybridizer_properties res;
res.UseHybridArrays = 0;
res.Flavor = 1;
res.DelegateSupport = 0;
res.CompatibilityMode = 0;
return res ;
}
#include <cuda.h>
struct HybridModule
{
void* module_data ;
CUmodule module ;
} ;
extern char __hybridizer_cubin_module_data [] ;
static HybridModule __hybridizer__gs_module = { 0 };
static int __hybridizer_initialized = 0;
// error code translation from CUresult to cudaError_t
namespace hybridizer {
cudaError_t translateCUresult(int cures)
{
switch (cures)
{
case CUDA_SUCCESS: return cudaSuccess ;
case CUDA_ERROR_INVALID_VALUE: return cudaErrorInvalidValue ;
case CUDA_ERROR_LAUNCH_FAILED: return cudaErrorLaunchFailure ;
case CUDA_ERROR_NOT_SUPPORTED: return cudaErrorNotSupported ;
case CUDA_ERROR_ILLEGAL_INSTRUCTION : return cudaErrorLaunchFailure ;
default: return cudaErrorUnknown ;
}
}
} // namespace hybridizer
#pragma region Wrappers definitions
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_OccupancyCalculator_MaxActiveBlocksPerSM(int* numBlocks, int blockSize, int sharedMemSize)
{
if (0 == __hybridizer_initialized)
{
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData(&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data);
__hybridizer__gs_module.module_data = (void*)__hybridizer_cubin_module_data;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures);
}
CUfunction __hybridizer__cufunc;
cures = cuModuleGetFunction(&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello");
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures);
return cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, __hybridizer__cufunc, blockSize, sharedMemSize);
}
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[2] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperStream_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, cudaStream_t st)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[3] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[2] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
#endif
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Hello_ExternCWrapperStreamGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, cudaStream_t st)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Hello") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[3] =
{
(void*)&__hybridizer_runtime,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#endif
extern "C" DLL_PUBLIC int Stimx46Programx46Add_OccupancyCalculator_MaxActiveBlocksPerSM(int* numBlocks, int blockSize, int sharedMemSize)
{
if (0 == __hybridizer_initialized)
{
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData(&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data);
__hybridizer__gs_module.module_data = (void*)__hybridizer_cubin_module_data;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures);
}
CUfunction __hybridizer__cufunc;
cures = cuModuleGetFunction(&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add");
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures);
return cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, __hybridizer__cufunc, blockSize, sharedMemSize);
}
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapper_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperStream_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, cudaStream_t st, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config, 0) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[5] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, 0, __hybridizer_launch_config) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
int __synchronizeRes = (int)::cudaDeviceSynchronize () ;
return __synchronizeRes ;
}
#endif
#if CUDART_VERSION >= 9000
extern "C" DLL_PUBLIC int Stimx46Programx46Add_ExternCWrapperStreamGridSync_CUDA( int gridDim_x, int gridDim_y, int blockDim_x, int blockDim_y, int blockDim_z, int shared, cudaStream_t st, float* const a, float* const b, int N)
{
if (0 == __hybridizer_initialized) {
cudaDeviceSynchronize();
__hybridizer_initialized = 1;
}
CUresult cures ;
if (__hybridizer__gs_module.module_data == 0)
{
cures = cuModuleLoadData (&(__hybridizer__gs_module.module), __hybridizer_cubin_module_data) ;
__hybridizer__gs_module.module_data = (void*) __hybridizer_cubin_module_data ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
}
CUfunction __hybridizer__cufunc ;
cures = cuModuleGetFunction (&__hybridizer__cufunc, __hybridizer__gs_module.module, "Stimx46Programx46Add") ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
hybridizer::runtime* __hybridizer_runtime = (hybridizer::runtime*) __hybridizer_init_basic_runtime();
void* __hybridizer_launch_config[6] =
{
(void*)&__hybridizer_runtime,
(void*)&a,
(void*)&b,
(void*)&N,
(void*)0
} ;
shared += 16 ; if (shared > 48*1024) shared = 48*1024 ;
cures = cuLaunchCooperativeKernel (__hybridizer__cufunc, gridDim_x, gridDim_y, 1, blockDim_x, blockDim_y, blockDim_z, shared, st, __hybridizer_launch_config) ;
if (cures != CUDA_SUCCESS) return hybridizer::translateCUresult((int)cures) ;
int cudaLaunchRes = (int)::cudaPeekAtLastError ();
if (cudaLaunchRes != 0) return cudaLaunchRes;
return cudaLaunchRes;
}
#endif
#pragma endregion
|
7cb01a743f5890f279b58fd687bbad7dd257145b.hip | // !!! This is a file automatically generated by hipify!!!
//#define _glibcxx_use_cxx11_abi 0
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "include/blur.h"
#include "include/greyscale.h"
#include "include/edge_detect.h"
#include "include/img_load.h"
#include <string>
using namespace std;
size_t num_rows, num_cols;
int main(int argc, char **argv) {
if(argc < 3) {
printf("Usage: ./image_proc <image_file> <filter>");
exit(1);
}
uchar4* h_out = NULL;
uchar4* h_image, *d_in, *d_in2;
string arg = string(argv[2]);
string file = string(argv[1]);
string output_file = "output.jpg";
load_img(file, &h_image, &num_rows, &num_cols);
hipMalloc((void **) &d_in, num_rows * num_cols * sizeof(uchar4));
hipMemcpy(d_in, h_image, num_rows * num_cols * sizeof(uchar4), hipMemcpyHostToDevice);
if(arg == "-b" || arg == "-blur") {
h_out = blur(d_in, num_rows, num_cols, 9);
}
else if(arg == "-g" || arg == "-grey" || arg == "-gray") {
h_out = greyscale(d_in, num_rows, num_cols);
}
else if(arg == "-e" || arg == "-edge") {
d_in2 = greyscale(d_in, num_rows, num_cols);
h_out = edge_detect(d_in2, num_rows, num_cols);
}
else {
printf("INCORRECT ARGUMENTS\n");
exit(1);
}
free(h_image);
save_img(h_out, output_file, num_rows, num_cols);
hipFree(d_in);
hipFree(d_in2);
}
| 7cb01a743f5890f279b58fd687bbad7dd257145b.cu | //#define _glibcxx_use_cxx11_abi 0
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <cuda.h>
#include "include/blur.h"
#include "include/greyscale.h"
#include "include/edge_detect.h"
#include "include/img_load.h"
#include <string>
using namespace std;
size_t num_rows, num_cols;
int main(int argc, char **argv) {
if(argc < 3) {
printf("Usage: ./image_proc <image_file> <filter>");
exit(1);
}
uchar4* h_out = NULL;
uchar4* h_image, *d_in, *d_in2;
string arg = string(argv[2]);
string file = string(argv[1]);
string output_file = "output.jpg";
load_img(file, &h_image, &num_rows, &num_cols);
cudaMalloc((void **) &d_in, num_rows * num_cols * sizeof(uchar4));
cudaMemcpy(d_in, h_image, num_rows * num_cols * sizeof(uchar4), cudaMemcpyHostToDevice);
if(arg == "-b" || arg == "-blur") {
h_out = blur(d_in, num_rows, num_cols, 9);
}
else if(arg == "-g" || arg == "-grey" || arg == "-gray") {
h_out = greyscale(d_in, num_rows, num_cols);
}
else if(arg == "-e" || arg == "-edge") {
d_in2 = greyscale(d_in, num_rows, num_cols);
h_out = edge_detect(d_in2, num_rows, num_cols);
}
else {
printf("INCORRECT ARGUMENTS\n");
exit(1);
}
free(h_image);
save_img(h_out, output_file, num_rows, num_cols);
cudaFree(d_in);
cudaFree(d_in2);
}
|
c0dd5d3525b0511b70bae2b5fd372f28703d55ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Kmeans_Cuda.h"
#include <time.h>
//Calling device reset
const char* stopCuda() {
hipError_t cudaStatus;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
return "hipDeviceReset failed";
return NULL;
}
//Setting the cuda device (0)
const char* initCuda() {
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
return "hipSetDevice failed! Do you have a CUDA-capable GPU installed?";
return NULL;
}
__global__ void updateTimeKernel(Point *dev_pointArr, double timeInterval, int numOfPoints) {
// Starting from the thread's id
// increasing the point index by grid size
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < numOfPoints; i += blockDim.x * gridDim.x)
{
dev_pointArr[i].position.x += timeInterval * dev_pointArr[i].velocity.vx;
dev_pointArr[i].position.y += timeInterval * dev_pointArr[i].velocity.vy;
dev_pointArr[i].position.z += timeInterval * dev_pointArr[i].velocity.vz;
}
}
// Increases time for some of points
const char* updateTimeCudaStart(Point* points, int numOfPoints, double dt, int moment, Point** gpu_points) {
double timeInterval = dt * moment;
Point *dev_points = 0;
int numOfBlocks;
hipError_t cudaStatus;
numOfBlocks = (numOfPoints + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numOfBlocks > MAX_BLOCKS)
numOfBlocks = MAX_BLOCKS;
if (*gpu_points == NULL) {
// Allocate GPU buffers for the points array
cudaStatus = hipMalloc((void**)&dev_points, numOfPoints * sizeof(Point));
if (cudaStatus != hipSuccess)
return "hipMalloc failed!";
*gpu_points = dev_points;
}
else
dev_points = *gpu_points;
// Copy data array from host memory to GPU buffers
cudaStatus = hipMemcpy(dev_points, points, numOfPoints * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
hipFree(dev_points);
return "hipMemcpy failed!";
}
// Launch a kernel increasing time for one part of the points on the GPU with
updateTimeKernel << <numOfBlocks, BLOCK_SIZE >> >(dev_points, timeInterval, numOfPoints);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
hipFree(dev_points);
return "countKernel launch failed";
}
return NULL;
}
const char* updateTimeCudaEnd(Point* dev_points, Point* pointsArr, int numOfPoints) {
hipError_t cudaStatus;
// hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
hipFree(dev_points);
return "hipDeviceSynchronize returned error code after launching countKernel!";
}
// Copy histogram result vector from GPU buffer to host memory
cudaStatus = hipMemcpy(pointsArr, dev_points, numOfPoints * sizeof(Point), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
hipFree(dev_points);
return "hipMemcpy failed!";
}
return NULL;
}
const char* freeCuda(Point* gpu_points) {
if (hipFree(gpu_points) != hipSuccess)
return "hipFree error";
return NULL;
}
| c0dd5d3525b0511b70bae2b5fd372f28703d55ee.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Kmeans_Cuda.h"
#include <time.h>
//Calling device reset
const char* stopCuda() {
cudaError_t cudaStatus;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
return "cudaDeviceReset failed";
return NULL;
}
//Setting the cuda device (0)
const char* initCuda() {
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
return "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?";
return NULL;
}
__global__ void updateTimeKernel(Point *dev_pointArr, double timeInterval, int numOfPoints) {
// Starting from the thread's id
// increasing the point index by grid size
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < numOfPoints; i += blockDim.x * gridDim.x)
{
dev_pointArr[i].position.x += timeInterval * dev_pointArr[i].velocity.vx;
dev_pointArr[i].position.y += timeInterval * dev_pointArr[i].velocity.vy;
dev_pointArr[i].position.z += timeInterval * dev_pointArr[i].velocity.vz;
}
}
// Increases time for some of points
const char* updateTimeCudaStart(Point* points, int numOfPoints, double dt, int moment, Point** gpu_points) {
double timeInterval = dt * moment;
Point *dev_points = 0;
int numOfBlocks;
cudaError_t cudaStatus;
numOfBlocks = (numOfPoints + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numOfBlocks > MAX_BLOCKS)
numOfBlocks = MAX_BLOCKS;
if (*gpu_points == NULL) {
// Allocate GPU buffers for the points array
cudaStatus = cudaMalloc((void**)&dev_points, numOfPoints * sizeof(Point));
if (cudaStatus != cudaSuccess)
return "cudaMalloc failed!";
*gpu_points = dev_points;
}
else
dev_points = *gpu_points;
// Copy data array from host memory to GPU buffers
cudaStatus = cudaMemcpy(dev_points, points, numOfPoints * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cudaFree(dev_points);
return "cudaMemcpy failed!";
}
// Launch a kernel increasing time for one part of the points on the GPU with
updateTimeKernel << <numOfBlocks, BLOCK_SIZE >> >(dev_points, timeInterval, numOfPoints);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cudaFree(dev_points);
return "countKernel launch failed";
}
return NULL;
}
const char* updateTimeCudaEnd(Point* dev_points, Point* pointsArr, int numOfPoints) {
cudaError_t cudaStatus;
// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cudaFree(dev_points);
return "cudaDeviceSynchronize returned error code after launching countKernel!";
}
// Copy histogram result vector from GPU buffer to host memory
cudaStatus = cudaMemcpy(pointsArr, dev_points, numOfPoints * sizeof(Point), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cudaFree(dev_points);
return "cudaMemcpy failed!";
}
return NULL;
}
const char* freeCuda(Point* gpu_points) {
if (cudaFree(gpu_points) != cudaSuccess)
return "cudaFree error";
return NULL;
}
|
a9bd0d9b4cabd97a7953893a4d39fe87a3c975a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <omp.h>
static const int E = 50000;
static const int N = 10;
static const int B = 512;
// Deprecated GPU initialization
//__global__
//void k_init_input(double *u, double *gxyz, int N) {
// int ielt = blockIdx.x;
// int sz_elt = N * N * N;
// int elt_offset = sz_elt*ielt;
//
// for(int it = threadIdx.x ; it < sz_elt ; it += blockDim.x)
// {
// int j=it/N; int i=it-j*N; int k=j/N; j-=k*N;
// u[it+elt_offset] = 0.1*((i+1)+(j+1)+(k+1))+(ielt+1)*100;
// for(int p=0;p<6;p++)
// gxyz[p+6*(it+elt_offset)]=(p+1)+(i+1)+(j+1)+(k+1)+(ielt+1)*1000;
// }
//}
// Original CUDA kernel
//__global__ void k_nekbone_ax(double *w, double *u, double *gxyz,
// double *dxm1, double *dxtm1, int np)
//{
// int sz_elt=np*np*np;
// int elt_offset=sz_elt*blockIdx.x; // directly map block id to element
// __shared__ double ur[1024];
// __shared__ double us[1024];
// __shared__ double ut[1024];
// __shared__ double ul[1024]; // 25% speedup
// int it,i,j,k,p,gi0;
// double s,wr,ws,wt;
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// ul[it]=u[it+elt_offset];
//
// __syncthreads();
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// {
// j=it/np; i=it-j*np; k=j/np; j-=k*np;
// wr=0.; ws=0.; wt=0.;
// for(p=0;p<np;p++)
// {
// wr+=dxtm1[p+i*np]*ul[p+np*(j+np*k)];
// ws+=dxtm1[p+j*np]*ul[i+np*(p+np*k)];
// wt+=dxtm1[p+k*np]*ul[i+np*(j+np*p)];
// }
// gi0=6*(it+elt_offset);
// ur[it] = gxyz[0+gi0]*wr + gxyz[1+gi0]*ws + gxyz[2+gi0]*wt;
// us[it] = gxyz[1+gi0]*wr + gxyz[3+gi0]*ws + gxyz[4+gi0]*wt;
// ut[it] = gxyz[2+gi0]*wr + gxyz[4+gi0]*ws + gxyz[5+gi0]*wt;
// }
//
// __syncthreads();
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// {
// j=it/np; i=it-j*np; k=j/np; j-=k*np;
// s=0.;
// for(p=0;p<np;p++)
// s+=dxm1[p+i*np]*ur[p+np*(j+np*k)]
// +dxm1[p+j*np]*us[i+np*(p+np*k)]
// +dxm1[p+k*np]*ut[i+np*(j+np*p)];
// w[it+elt_offset] = s;
// }
//}
void init(double *u, double *g, double *d, double *dt) {
for(int j = 0; j < N; j++) {
for(int i = 0; i < N; i++) {
dt[i * N + j] = d[j * N + i] = (i + 1) * (i + 1) + (j + 1);
}
}
#pragma omp parallel for
for (size_t e = 0; e < E; ++e) {
size_t e_offset = e * N * N * N;
for (size_t k = 0; k < N; ++k) {
for (size_t j = 0; j < N; ++j) {
for (size_t i = 0; i < N; ++i) {
size_t offset = k * N * N + j * N + i;
u[e_offset + offset] = 0.1 * ((i + 1) + (j + 1) + (k + 1)) + (e + 1) * 100;
for (size_t p = 0; p < 6; ++p) {
#ifdef CUDA1
g[6 * (e_offset + offset) + p] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA2
g[6 * (e_offset + offset) + p] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA3
g[6 * e_offset + p * N * N * N + offset] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA4
g[6 * e_offset + p * N * N * N + offset] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#endif
}
}
}
}
}
}
#ifdef CUDA1
#include "cuda1.cu"
#elif defined CUDA2
#include "cuda2.cu"
#elif defined CUDA3
#include "cuda3.cu"
#elif defined CUDA4
#include "cuda4.cu"
#endif
int main() {
double *w, *d, *dt, *g, *u;
double *w_d, *d_d, *dt_d, *g_d, *u_d;
g = (double *)calloc(6 * N * N * N * E, sizeof(double));
u = (double *)calloc(N * N * N * E, sizeof(double));
w = (double *)calloc(N * N * N * E, sizeof(double));
d = (double *)calloc(N * N, sizeof(double));
dt = (double *)calloc(N * N, sizeof(double));
hipMalloc<double>(&g_d, sizeof(double) * 6 * N * N * N * E);
hipMalloc<double>(&u_d, sizeof(double) * N * N * N * E);
hipMalloc<double>(&w_d, sizeof(double) * N * N * N * E);
hipMalloc<double>(&d_d, sizeof(double) * N * N);
hipMalloc<double>(&dt_d, sizeof(double) * N * N);
init(u, g, d, dt);
hipMemcpy(g_d, g, sizeof(double) * 6 * N * N * N * E, hipMemcpyHostToDevice);
hipMemcpy(u_d, u, sizeof(double) * N * N * N * E, hipMemcpyHostToDevice);
hipMemcpy(d_d, d, sizeof(double) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dt_d, dt, sizeof(double) * N * N, hipMemcpyHostToDevice);
float elapsed_time = 0.0;
hipEvent_t event_start, event_end;
hipEventCreate(&event_start);
hipEventCreate(&event_end);
hipEventRecord(event_start);
const float RN = 1.0 / N;
#ifdef CUDA1
hipLaunchKernelGGL(( nekbone), dim3(E), dim3(B), 0, 0, w_d, u_d, g_d, d_d, dt_d, N);
#elif defined CUDA2
hipLaunchKernelGGL(( nekbone), dim3(E), dim3(B), 0, 0, w_d, u_d, g_d, d_d, N);
#elif defined CUDA3
hipLaunchKernelGGL(( nekbone), dim3(E), dim3(B), 0, 0, w_d, u_d, g_d, d_d, N);
#elif defined CUDA4
hipLaunchKernelGGL(( nekbone), dim3(E), dim3(B), 0, 0, w_d, u_d, g_d, d_d, N, RN);
#endif
hipEventRecord(event_end);
hipEventSynchronize(event_end);
hipEventElapsedTime(&elapsed_time, event_start, event_end);
elapsed_time /= 1000.0;
printf("kernel time (s): %f\n", elapsed_time);
hipMemcpy(w, w_d, sizeof(double) * N * N * N * E, hipMemcpyDeviceToHost);
printf("First 5 sums:\n");
//2.143933e+14 8.402399e+14 1.877629e+15 3.326562e+15 5.187038e+15
int it = 0;
int it_next = 0;
for(size_t i = 0; i < 5; ++i) {
it_next += N * N * N;
double s = 0.0;
for(; it < it_next; it++) {
s += w[it];
}
printf("%14.6e",s);
}
printf("\n");
free(g);
free(u);
free(w);
free(d);
free(dt);
hipFree(g_d);
hipFree(u_d);
hipFree(w_d);
hipFree(d_d);
hipFree(dt_d);
return 0;
}
| a9bd0d9b4cabd97a7953893a4d39fe87a3c975a9.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <omp.h>
static const int E = 50000;
static const int N = 10;
static const int B = 512;
// Deprecated GPU initialization
//__global__
//void k_init_input(double *u, double *gxyz, int N) {
// int ielt = blockIdx.x;
// int sz_elt = N * N * N;
// int elt_offset = sz_elt*ielt;
//
// for(int it = threadIdx.x ; it < sz_elt ; it += blockDim.x)
// {
// int j=it/N; int i=it-j*N; int k=j/N; j-=k*N;
// u[it+elt_offset] = 0.1*((i+1)+(j+1)+(k+1))+(ielt+1)*100;
// for(int p=0;p<6;p++)
// gxyz[p+6*(it+elt_offset)]=(p+1)+(i+1)+(j+1)+(k+1)+(ielt+1)*1000;
// }
//}
// Original CUDA kernel
//__global__ void k_nekbone_ax(double *w, double *u, double *gxyz,
// double *dxm1, double *dxtm1, int np)
//{
// int sz_elt=np*np*np;
// int elt_offset=sz_elt*blockIdx.x; // directly map block id to element
// __shared__ double ur[1024];
// __shared__ double us[1024];
// __shared__ double ut[1024];
// __shared__ double ul[1024]; // 25% speedup
// int it,i,j,k,p,gi0;
// double s,wr,ws,wt;
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// ul[it]=u[it+elt_offset];
//
// __syncthreads();
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// {
// j=it/np; i=it-j*np; k=j/np; j-=k*np;
// wr=0.; ws=0.; wt=0.;
// for(p=0;p<np;p++)
// {
// wr+=dxtm1[p+i*np]*ul[p+np*(j+np*k)];
// ws+=dxtm1[p+j*np]*ul[i+np*(p+np*k)];
// wt+=dxtm1[p+k*np]*ul[i+np*(j+np*p)];
// }
// gi0=6*(it+elt_offset);
// ur[it] = gxyz[0+gi0]*wr + gxyz[1+gi0]*ws + gxyz[2+gi0]*wt;
// us[it] = gxyz[1+gi0]*wr + gxyz[3+gi0]*ws + gxyz[4+gi0]*wt;
// ut[it] = gxyz[2+gi0]*wr + gxyz[4+gi0]*ws + gxyz[5+gi0]*wt;
// }
//
// __syncthreads();
//
// for(it=threadIdx.x ; it<sz_elt ; it+=blockDim.x)
// {
// j=it/np; i=it-j*np; k=j/np; j-=k*np;
// s=0.;
// for(p=0;p<np;p++)
// s+=dxm1[p+i*np]*ur[p+np*(j+np*k)]
// +dxm1[p+j*np]*us[i+np*(p+np*k)]
// +dxm1[p+k*np]*ut[i+np*(j+np*p)];
// w[it+elt_offset] = s;
// }
//}
void init(double *u, double *g, double *d, double *dt) {
for(int j = 0; j < N; j++) {
for(int i = 0; i < N; i++) {
dt[i * N + j] = d[j * N + i] = (i + 1) * (i + 1) + (j + 1);
}
}
#pragma omp parallel for
for (size_t e = 0; e < E; ++e) {
size_t e_offset = e * N * N * N;
for (size_t k = 0; k < N; ++k) {
for (size_t j = 0; j < N; ++j) {
for (size_t i = 0; i < N; ++i) {
size_t offset = k * N * N + j * N + i;
u[e_offset + offset] = 0.1 * ((i + 1) + (j + 1) + (k + 1)) + (e + 1) * 100;
for (size_t p = 0; p < 6; ++p) {
#ifdef CUDA1
g[6 * (e_offset + offset) + p] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA2
g[6 * (e_offset + offset) + p] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA3
g[6 * e_offset + p * N * N * N + offset] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#elif defined CUDA4
g[6 * e_offset + p * N * N * N + offset] = (p + 1) + (i + 1) + (j + 1) + (k + 1) + (e + 1) * 1000;
#endif
}
}
}
}
}
}
#ifdef CUDA1
#include "cuda1.cu"
#elif defined CUDA2
#include "cuda2.cu"
#elif defined CUDA3
#include "cuda3.cu"
#elif defined CUDA4
#include "cuda4.cu"
#endif
int main() {
double *w, *d, *dt, *g, *u;
double *w_d, *d_d, *dt_d, *g_d, *u_d;
g = (double *)calloc(6 * N * N * N * E, sizeof(double));
u = (double *)calloc(N * N * N * E, sizeof(double));
w = (double *)calloc(N * N * N * E, sizeof(double));
d = (double *)calloc(N * N, sizeof(double));
dt = (double *)calloc(N * N, sizeof(double));
cudaMalloc<double>(&g_d, sizeof(double) * 6 * N * N * N * E);
cudaMalloc<double>(&u_d, sizeof(double) * N * N * N * E);
cudaMalloc<double>(&w_d, sizeof(double) * N * N * N * E);
cudaMalloc<double>(&d_d, sizeof(double) * N * N);
cudaMalloc<double>(&dt_d, sizeof(double) * N * N);
init(u, g, d, dt);
cudaMemcpy(g_d, g, sizeof(double) * 6 * N * N * N * E, cudaMemcpyHostToDevice);
cudaMemcpy(u_d, u, sizeof(double) * N * N * N * E, cudaMemcpyHostToDevice);
cudaMemcpy(d_d, d, sizeof(double) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dt_d, dt, sizeof(double) * N * N, cudaMemcpyHostToDevice);
float elapsed_time = 0.0;
cudaEvent_t event_start, event_end;
cudaEventCreate(&event_start);
cudaEventCreate(&event_end);
cudaEventRecord(event_start);
const float RN = 1.0 / N;
#ifdef CUDA1
nekbone<<<E, B>>>(w_d, u_d, g_d, d_d, dt_d, N);
#elif defined CUDA2
nekbone<<<E, B>>>(w_d, u_d, g_d, d_d, N);
#elif defined CUDA3
nekbone<<<E, B>>>(w_d, u_d, g_d, d_d, N);
#elif defined CUDA4
nekbone<<<E, B>>>(w_d, u_d, g_d, d_d, N, RN);
#endif
cudaEventRecord(event_end);
cudaEventSynchronize(event_end);
cudaEventElapsedTime(&elapsed_time, event_start, event_end);
elapsed_time /= 1000.0;
printf("kernel time (s): %f\n", elapsed_time);
cudaMemcpy(w, w_d, sizeof(double) * N * N * N * E, cudaMemcpyDeviceToHost);
printf("First 5 sums:\n");
//2.143933e+14 8.402399e+14 1.877629e+15 3.326562e+15 5.187038e+15
int it = 0;
int it_next = 0;
for(size_t i = 0; i < 5; ++i) {
it_next += N * N * N;
double s = 0.0;
for(; it < it_next; it++) {
s += w[it];
}
printf("%14.6e",s);
}
printf("\n");
free(g);
free(u);
free(w);
free(d);
free(dt);
cudaFree(g_d);
cudaFree(u_d);
cudaFree(w_d);
cudaFree(d_d);
cudaFree(dt_d);
return 0;
}
|
d4f9488b0afdc28ee4ebf9c231618613c41168a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "erl_nif.h"
#include "rocblas.h"
#include "stdio.h"
#include "time.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define IDX3C(c,i,j,in_h,in_w) ((c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX4C(n,c,i,j,in_c,in_h,in_w) ((n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX5C(t,n,c,i,j,in_n,in_c,in_h,in_w) ((t)*((in_n)*(in_c)*(in_h)*(in_w)) + (n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define BREAK return(enif_make_int(env, 0));
#define PI 3.14159265358979323846
#define SIGMOID(x) (1 / (1+exp(-1*x)))
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
return enif_make_int(env,10000+(int)error); \
} \
}
#define CUBLAS(call) \
{ \
const cublasStatus error = call; \
if (error != HIPBLAS_STATUS_SUCCESS) \
{ \
return enif_make_int(env,11000+(int)error); \
} \
}
__global__ void pooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,in_h2,in_w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w;
float max,fmax_h,fmax_w;
n1 = bid;
c1 = tid;
in_h2 = in_h / st_h;
in_w2 = in_w / st_w;
for(w2=0;w2<in_w2;w2++){
for(h2=0;h2<in_h2;h2++){
max = -999999999.0;
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)] >= max){
max = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
max_h = h1;
max_w = w1;
}
}
}
b[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = max;
fmax_h = (float)max_h;
fmax_w = (float)max_w;
c[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = fmax_h * 1000.0 + fmax_w;
}
}
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
6th arg stride
return list [ts1,ts2]
ts1 is result data for forward
ts2 is result data dor backward. this is sparse matrix
e.g.
|0.1,0.2,0.3,0.4|
|0.5,0.6,0.7,0.8|
|0.9,1.0,1.1,1.2|
|1.3,1.4,1.5,1.6|
ts1
|0.6,0.8|
|1.4,1.6|
ts2
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
*/
static ERL_NIF_TERM
pooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,tuple;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &st_h)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_w)) return enif_make_int(env,7);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h / st_h) * (in_w / st_w);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n2 * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n2 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
hipLaunchKernelGGL(( pooling_kernel) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host b,c from GPU dev_b,dev_c
CHECK(hipMemcpy(b, dev_b, n2 * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(c, dev_c, n2 * sizeof(float), hipMemcpyDeviceToHost));
// return forward data and backward data with tuple {b_bin,c_bin}
tuple = enif_make_tuple2(env,b_bin,c_bin);
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(tuple);
}
__global__ void unpooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w,in_h1,in_w1;
float loss,elt;
n1 = bid;
c1 = tid;
in_h1 = in_h * st_h;
in_w1 = in_w * st_w;
for(h2=0;h2<in_h;h2++){
for(w2=0;w2<in_w;w2++){
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
elt = a[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
loss = b[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
max_h = (int) floor(elt / 1000.0);
max_w = (int) fmodf(elt,1000.0);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 == max_h && w1 == max_w){
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = loss;
}
else{
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = 0.0;
}
}
}
}
}
}
/*
1st arg in_n of sparse-tensor
2nd arg in_c of sparse-tensor
3rd arg in_h of sparse-tensor
4th arg in_w of sparse-tensor
5th arg binary of sparse-tensor
6th arg binary of loss-tensor
7th arg stride
return gradiate tensor
e.g.
ts1 index-tensor
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
ts2 loss-tensor
|0.1,0.2|
|0.3,0.4|
return
|0.0,0.0,0.0,0.0|
|0.0,0.1,0.0,0.2|
|0.0,0.0,0.0,0.0|
|0.0,3.4,0.0,0.4|
*/
static ERL_NIF_TERM
unpooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &st_w)) return enif_make_int(env,8);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h * st_h) * (in_w * st_w);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n2 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
hipLaunchKernelGGL(( unpooling_kernel) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(c, dev_c, n2 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void convolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int oh, int ow)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
sum = 0.0;
start_h1 = st_h*h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = st_w*w2-pad;
end_w1 = start_w1 + filt_w;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX4C(c2,c1,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_n,oh,ow)] = sum;
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input tensor
10th arg binary of filter tensor
11th arg stride
12th arg padding
*/
static ERL_NIF_TERM
convolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, n1, n2, n3, oh, ow;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
oh = (in_h+2*pad-filt_h)/st_h + 1;
ow = (in_w+2*pad-filt_w)/st_w + 1;
n3 = in_n * filt_n * oh * ow; // n of filter generate n channel
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
hipLaunchKernelGGL(( convolute1_kernel) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad, in_c, in_h, in_w, oh, ow);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void deconvolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad1, int pad, int in_c, int in_h, int in_w, int oh, int ow, int oh1, int ow1)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//full convolute. stride=1 always
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad1;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad1;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
if(h2-pad >=0 && h2-pad < oh1 && w2-pad >= 0 && w2-pad < ow1){
c[IDX4C(n1,c2,h2-pad,w2-pad,filt_c,oh1,ow1)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
memo
ex padding = 1
loss 4*4
filter 2*2
input 3*3 padding=1
(3-2+2*1)/1 + 1 = 4
decovolute compute 5*5(3*3 padding=1) and save result range 3*3
*/
static ERL_NIF_TERM
deconvolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, pad1, n1, n2, n3, oh, ow, oh1, ow1, i,j,k,l;
float *a,*b, *b1, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
pad1 = filt_h - 1;
// pad1 = filt_h -1, pad is original padding size
oh = (in_h+2*pad1-filt_h)/st_h + 1;
ow = (in_w+2*pad1-filt_w)/st_w + 1;
oh1 = (in_h+2*(pad1-pad)-filt_h)/st_h + 1;
ow1 = (in_w+2*(pad1-pad)-filt_w)/st_w + 1;
n3 = in_n * filt_c * oh1 * ow1; // channel of filter generate same channel input tensor
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b1, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
hipLaunchKernelGGL(( deconvolute1_kernel) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, pad, in_c, in_h, in_w, oh, ow, oh1, ow1);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
enif_free(b1);
return(c_bin);
}
__global__ void deconvolute2_kernel(float *a1, float *a, float *b, float *c, int filt_n, int filt_c,int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int loss_h, int loss_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
int j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
// caution! stride=1
oh = (in_h+2*pad-filt_h) + 1;
ow = (in_w+2*pad-filt_w) + 1;
//dilate loss tensor.
for(j=0;j<filt_n;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = a[IDX4C(n1,j,k,l,in_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
a1[IDX4C(n1,j,k1,l1,in_c,in_h,in_w)] = elt1;
}
}
}
//full convulute. stride=1
for(c2=0;c2<filt_c;c2++){
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a1[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_c,oh,ow)] = sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
1st arg in_n of input loss tensor
2nd arg in_c of input loss tensor
3rd arg in_h of input loss tensor
4th arg in_w of input loss tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
*/
static ERL_NIF_TERM
deconvolute2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h, filt_w, st_h, st_w,pad, pad1, n1, n2, n3, oh, ow, i,j,k,l, loss_h, loss_w;
float *a, *a1, *b, *b1, *c;
float *dev_a, *dev_a1, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &loss_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &loss_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
// size for dilate
in_h = loss_h + (loss_h - 1)*(st_h - 1);
in_w = loss_w + (loss_w - 1)*(st_w - 1);
n1 = in_n * in_c * in_h * in_w; //loss tensor size
n2 = filt_n * filt_c * filt_h * filt_w; //filter tensor size
pad1 = (filt_h - 1) + pad; //padding size with dilate
oh = (in_h+2*pad1-filt_h) + 1; //output deconvolute tensor size. caution stride=1.
ow = (in_w+2*pad1-filt_w) + 1; //
n3 = in_n * filt_c * oh * ow; //
a = (float *) a_bin.data;
b = (float *) b_bin.data;
a1 = (float *) enif_alloc(n1 * sizeof(float));
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// dilate
for(i=0;i<n1;i++){
a1[i] = 0.0;
}
CHECK(hipMalloc((void**)&dev_a1, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_a, in_n*1*loss_h*loss_w * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
CHECK(hipMemcpy(dev_a1, a1, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_a, a, in_n*1*loss_h*loss_w * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b1, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
hipLaunchKernelGGL(( deconvolute2_kernel) , dim3(blocks), filt_c>> >(dev_a1, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, in_c, in_h, in_w, loss_h, loss_w);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_a1);
hipFree(dev_b);
hipFree(dev_c);
enif_free(a1);
enif_free(b1);
return(c_bin);
}
__global__ void gradfilter1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h;h2++){
for(w2=0;w2<loss_w;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b[IDX4C(n1,c2,h2,w2,loss_c,loss_h,loss_w)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,i,j,k,l,m;
float *a,*b,*c,*d;
float *dev_a, *dev_b, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter1_kernel , blocks, threads, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(d_bin);
}
__global__ void gradfilter2_kernel(float *a, float *b1, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3,loss_h1,loss_w1,j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//dilated loss tensor size
loss_h1 = loss_h+(loss_h-1)*(st_h-1);
loss_w1 = loss_w+(loss_w-1)*(st_w-1);
//dilate loss tensor.
for(j=0;j<loss_c;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = b[IDX4C(n1,j,k,l,loss_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
b1[IDX4C(n1,j,k1,l1,loss_c,loss_h1,loss_w1)] = elt1;
}
}
}
//convolute input tensor with dilated loss tensor. cuation stride is always 1.
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h1;h2++){
for(w2=0;w2<loss_w1;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b1[IDX4C(n1,c2,h2,w2,loss_c,loss_h1,loss_w1)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = + sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
gradfilter2 is for stride >= 2. This one requires dilate
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,n5,i,j,k,l,m;
float *a,*b,*b1,*c,*d;
float *dev_a, *dev_b, *dev_b1, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
n5 = in_n * loss_c * (loss_h+(loss_h-1)*(st_h-1)) * (loss_w+(loss_w-1)*(st_w-1)); // dilated loss tensor size
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n5 * sizeof(float)); // dilate loss tensor area
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
//initialize b1
for(i=0;i<n5;i++){
b1[i] = 0.0;
}
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b1, n5 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b1, b1, n5 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter2_kernel <<<blocks, threads>> >(dev_a, dev_b1, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_b1);
hipFree(dev_c);
enif_free(b1);
return(d_bin);
}
__global__ void full_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX4C(n1,i,j,k,in_c,in_h,in_w)];
b[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
full1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
full_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(b, dev_b, n1 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void unfull_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)];
b[IDX4C(n1,i,j,k,in_c,in_h,in_w)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
unfull1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
unfull_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(b, dev_b, n1 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
new1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
ERL_NIF_TERM a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &d)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
a[i] = (float)d;
}
return(a_bin);
}
static ERL_NIF_TERM
new2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int r1,c1,i,j;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, r1 * c1 * sizeof(float), &a_bin);
// Set matrix data
list = argv[2]; /* matrix1 */
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX2C(i,j,r1)] = (float)d;
}
}
return(a_bin);
}
static ERL_NIF_TERM
new3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int c,h,w,i,j,k;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
a = (float *) enif_make_new_binary(env, c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[3]; /* matrix1 */
for(i=0;i<c;i++){
for(j=0;j<h;j++){
for(k=0;k<w;k++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX3C(i,j,k,h,w)] = (float)d;
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
new4(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,c,h,w,i,j,k,l;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
a = (float *) enif_make_new_binary(env, n * c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[4]; /* matrix1 */
for(i=0;i<n;i++){
for(j=0;j<c;j++){
for(k=0;k<h;k++){
for(l=0;l<w;l++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX4C(i,j,k,l,c,h,w)] = (float)d;
}
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
rand1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
float x,y,val;
float *result_data;
ERL_NIF_TERM result;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
result_data = (float *) enif_make_new_binary(env, n * sizeof(float), &result);
srand((unsigned) time(NULL));
for(i=0;i<n;i++){
//box_muller
x = (float)rand()/(float)RAND_MAX;
y = (float)rand()/(float)RAND_MAX;
val = sqrt(-2.0 * log(x)) * cos(2.0 * PI * y);
result_data[i] = val;
}
return(result);
}
static ERL_NIF_TERM
mult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, r2, c2, n, i, j;
float *a,*b,*c;
float* devPtrA;
float* devPtrB;
float* devPtrC;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin)) return enif_make_int(env,6);
n = r1*c2;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
for(j=0;j<c2;j++)
for(i=0;i<r1;i++)
c[IDX2C(i,j,r1)] = 0.0;
// Initialize CUBLAS
hipblasInit();
CUBLAS(hipblasAlloc (r1*c1, sizeof(*a), (void**)&devPtrA));
CUBLAS(hipblasAlloc (r2*c2, sizeof(*b), (void**)&devPtrB));
CUBLAS(hipblasAlloc (r1*c2, sizeof(*c), (void**)&devPtrC));
CUBLAS(hipblasSetMatrix (r1, c1, sizeof(*a), a, r1, devPtrA, r1));
CUBLAS(hipblasSetMatrix (r2, c2, sizeof(*b), b, r2, devPtrB, r2));
CUBLAS(hipblasSetMatrix (r1, c2, sizeof(*c), c, r1, devPtrC, r1));
//Sgemm
hipblasSgemm('N', 'N', r1, c2, c1, 1.0, devPtrA, r1, devPtrB, r2, 0.0, devPtrC, r1);
CUBLAS(hipblasGetMatrix (r1, c2, sizeof(*c), devPtrC, r1, c, r1));
// Shutdown CUBLAS
hipblasFree(devPtrA);
hipblasFree(devPtrB);
hipblasFree(devPtrC);
hipblasShutdown();
return(c_bin);
}
__global__ void add1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
add1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
add1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void sub1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
sub1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sub1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void emult1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
emult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
emult1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
static ERL_NIF_TERM
transpose1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j;
float *a,*b;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
b[IDX2C(j,i,c1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
ident1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i,j;
ERL_NIF_TERM a_bin;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
a = (float *) enif_make_new_binary(env, n * n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(i==j)
a[IDX2C(i,j,n)] = 1.0;
else
a[IDX2C(i,j,n)] = 0.0;
}
}
return(a_bin);
}
__global__ void sigmoid_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = SIGMOID(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void tanh_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = tanh(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
tanh_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void relu_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] >= 0)
b[tid] = a[tid];
else
b[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
relu_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
activate_softmax(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, k;
float *a,*b;
float max,sum,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
//calculate softmax
delta = 0.01;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
max = -3.402823e38;
for(k=0;k<c1;k++){
if(a[IDX2C(i,k,r1)] > max)
max = a[IDX2C(i,k,r1)];
}
sum = 0.0;
for(k=0;k<c1;k++){
sum = sum + exp(a[IDX2C(i,k,r1)] - max);
}
b[IDX2C(i,j,r1)] = exp(a[IDX2C(i,j,r1)] - max) / (sum+delta);
}
}
return(b_bin);
}
__global__ void differ_sigmoid_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * ((1 - SIGMOID(b[tid])) * SIGMOID(b[tid]));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void differ_tanh_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * (1/(cosh(b[tid]) * cosh(b[tid])));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_tanh_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void differ_relu_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(b[tid] >= 0)
c[tid] = a[tid];
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_relu_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void smult_kernel(float d, float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = d * a[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
smult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
double s;
if (!enif_get_double(env, argv[0], &s)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &n)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
smult_kernel << <128, 128 >> >((float)s,dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
trace1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float trace;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
trace = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==j)
trace = trace + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,trace);
return(result);
}
static ERL_NIF_TERM
mean_square(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] - b[IDX2C(i,j,r1)];
s = s + d*d;
}
}
s = s / (2.0*(float(r1)));
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
cross_entropy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
delta = 1e-7;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] + delta;
s = s + b[IDX2C(i,j,r1)] * log(d);
}
}
s = -1.0 * s / (float)r1;
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
elt1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &i)) enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &j)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
a = (float *) a_bin.data;
result = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
return(result);
}
static ERL_NIF_TERM
set1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)] + (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n1, c1, h1, w1, n, i, j, k, l, n2, c2, h2, w2;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &n2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &h2)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &w2)) return enif_make_int(env,9);
if (!enif_get_double(env, argv[9], &val)) return enif_make_int(env,10);
n = n1*c1*h1*w1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<n1;i++){
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
if(i==n2 && j==c2 && k==h2 && l==w2){
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)] + (float)val;
}
else {
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)];
}
}
}
}
}
return(b_bin);
}
static ERL_NIF_TERM
average1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, i, j;
float *a,*b;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, c1 * sizeof(float), &b_bin);
for(j=0;j<c1;j++){
sum = 0.0;
for(i=0;i<r1;i++){
sum = sum + a[IDX2C(i,j,r1)];
}
b[j] = sum / (float)r1;
}
return(b_bin);
}
/*
1st arg row-size of matrix
2nd arg col-size of matrix
3rd arg matrix data binary
*/
static ERL_NIF_TERM
sum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
sum = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
sum = sum + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,sum);
return(result);
}
/*
transfer 2 DIm matrix to list
*/
static ERL_NIF_TERM
to_list1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=r1-1;i>=0;i--){
for(j=c1-1;j>=0;j--){
head = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
list = enif_make_list_cell(env,head,list);
}
}
return(list);
}
/*
transfer 3 DIm matrix to list
*/
static ERL_NIF_TERM
to_list2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int c, h, w, i, j, k;
float *a;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=c-1;i>=0;i--){
for(j=h-1;j>=0;j--){
for(k=w-1;k>=0;k--){
head = enif_make_double(env,(double)a[IDX3C(i,j,k,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
return(list);
}
/*
transfer 4 DIm matrix to list
*/
static ERL_NIF_TERM
to_list3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int n, c, h, w, i, j, k, l;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_badarg(env);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=n-1;i>=0;i--){
for(j=c-1;j>=0;j--){
for(k=h-1;k>=0;k--){
for(l=w-1;l>=0;l--){
head = enif_make_double(env,(double)a[IDX4C(i,j,k,l,c,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
}
return(list);
}
__global__ void dropout1_kernel(float *a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
a[tid] = 1.0;
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg size of mask tensor
2nd arg rate of dropout
return mask tensor
element of mask tensor is basicaly 1.0.
element of dropout rate is 0.0.
when forward and backward, generate Hadamard product with mask tensor
*/
static ERL_NIF_TERM
dropout1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ERL_NIF_TERM a_bin;
int n,count,i,j;
float *a,*dev_a;
double dropout_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &dropout_rate)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
dropout1_kernel << <128, 128 >> >(dev_a, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(a, dev_a, n * sizeof(float), hipMemcpyDeviceToHost));
// dropout
count = (int)(double(n)*dropout_rate);
for(i=0;i<count;i++){
j = rand() % n;
a[j] = 0.0;
}
// free
hipFree(dev_a);
return(a_bin);
}
__global__ void sgd1_kernel(float *a, float *b, float *c, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid]*lr;
tid += blockDim.x * gridDim.x;
}
}
/*
w - g*lr
w is weight matrix.
g is gradient matrix.
when element of w is zero result is zero. This means dropout.
return updated weight matrix.
1st arg is size of vectorized matrix
2nd arg is weight matrix or tensor
3rd arg is gradient matrix or tensor
4th arg is learning rate
*/
static ERL_NIF_TERM
sgd1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c,*dev_a, *dev_b, *dev_c;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_get_double(env, argv[3], &learning_rate)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sgd1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, lr, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
/*
def momentum(v, g, lr) do
Matrex.apply(v, g, fn v, g -> 0.5 * v - lr * g end)
end
*/
__global__ void momentum_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = ((0.9 * b[tid]) - (lr * c[tid]));
e[tid] = a[tid] + d[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a)
3rd arg v-matrix (b)
4th arg gradient-matrix (c)
5th arg learning rate
return tuple {next_v-mattrix,weight_matrix}
*/
static ERL_NIF_TERM
momentum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c ,*dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
momentum_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(d, dev_d, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(e, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAGRAD
h1 = h + grad*grad
lr1 = lr/(sqrt(h1))
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void adagrad_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1;
while (tid < n)
{
d[tid] = b[tid] + c[tid]*c[tid];
if(d[tid] != 0.0)
lr1 = lr/(sqrt(d[tid]));
else
lr1 = lr;
e[tid] = a[tid] - lr1 * c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
adagrad1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
adagrad_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(hipMemcpy(d, dev_d, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(e, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* RMSprop
h1 = alpha * h + (1 - alpha) * grad*grad
lr1 = lr /(sqrt(h) + epsilon)
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void rms_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1,alpha,epsilon;
alpha = 0.99;
epsilon = 10.0e-7;
while (tid < n)
{
d[tid] = alpha * b[tid] + (1-alpha)*c[tid]*c[tid];
lr1 = lr/(sqrt(d[tid])+epsilon);
e[tid] = a[tid] - lr1*c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
rms1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
rms_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(hipMemcpy(d, dev_d, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(e, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAM
beta1 = 0.9
beta2 = 0.999
epsilon = 10.0e-7
alpha = 0.001
m1 = beta1 * m + (1 - beta1) * grad
v1 = beta2 * v + (1 - beta2) * grad^2
m2 = m1/(1 - beta1)
v2 = v1/(1 - beta2)
w1 = w - alpha * m2/(sqrt(v2)+epsilon)
a[] is w
b[] is m
c[] is v
d[] is grad
e[] is m1
f[] is v1
g[] is w1
*/
__global__ void adam_kernel(float *a, float *b, float *c, float *d, float *e, float *f, float *g, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float beta1,beta2,epsilon,m2,v2;
beta1 = 0.9;
beta2 = 0.999;
epsilon = 10.0e-7;
//alpha = 0.001;
while (tid < n){
e[tid] = beta1 * b[tid] + (1 - beta1) * d[tid];
f[tid] = beta2 * c[tid] + (1 - beta2) * d[tid]*d[tid];
m2 = e[tid]/(1-beta1);
v2 = f[tid]/(1-beta2);
g[tid] = a[tid] - lr * (m2/(sqrt(v2)+epsilon));
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg w-matrix (a_bin)
3rd arg m-matrix (b_bin)
4th arg v-matrix (c_bin)
5th arg grad-matrix (d_bin)
6th arg learning rate
return tuple {m1,v1,w1}
*/
static ERL_NIF_TERM
adam1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin,d_bin;
ERL_NIF_TERM e_bin,f_bin,g_bin,tuple;
int n;
float *a,*b,*c,*d,*e,*f,*g;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e, *dev_f, *dev_g;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &d_bin)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &learning_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) d_bin.data;
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
f = (float *) enif_make_new_binary(env, n * sizeof(float), &f_bin);
g = (float *) enif_make_new_binary(env, n * sizeof(float), &g_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_f, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_g, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_f, f, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_g, g, n * sizeof(float), hipMemcpyHostToDevice));
adam_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, dev_f, dev_g, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(hipMemcpy(e, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(f, dev_f, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(g, dev_g, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
hipFree(dev_f);
hipFree(dev_g);
tuple = enif_make_tuple3(env,e_bin,f_bin,g_bin);
return(tuple);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return accuracy rate
*/
static ERL_NIF_TERM
accuracy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
double max,rate;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate accuracy
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
rate = (double)sum / (double)r1;
result = enif_make_double(env,rate);
return(result);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return correct number
*/
static ERL_NIF_TERM
correct1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
float max;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate correct number
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
result = enif_make_double(env,(double)sum);
return(result);
}
static ERL_NIF_TERM
random_select1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int r1, c1, r2, c2, i, j, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &n)) return enif_make_int(env,7);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % r1;
for(j=0;j<c1;j++){
c[IDX2C(i,j,n)] = a[IDX2C(r,j,r1)];
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
random_select2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,c1,h1,w1,r2,c2, i, j, k, l, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &r2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &n)) return enif_make_int(env,9);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
c[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(r,j,k,l,c1,h1,w1)];
}
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
is_near1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n, sw;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// near check
sw = 0;
for(i=0;i<n;i++){
if(fabsf(a[i]) > fabsf(b[i])*1.15 || fabsf(a[i]) < fabsf(b[i])*0.85){
printf("%f %f \r\n", a[i], b[i]);
sw = 1;
}
}
if(sw == 0)
return enif_make_int(env,1); //true
else
return enif_make_int(env,0); //false
}
static ERL_NIF_TERM
is_equal1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// equal check
for(i=0;i<n;i++){
if(a[i] != b[i]){
return enif_make_int(env,0); //false
}
}
return enif_make_int(env,1); //true
}
static ERL_NIF_TERM
analizer1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
int i, n, id;
float *a;
float max,min,sum;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &id)) return enif_make_int(env,3);
a = (float *) a_bin.data;
// near check
for(i=0;i<n;i++){
if(isnan(a[i])){
return enif_make_int(env,9999);
}
if(isinf(a[i])){
return enif_make_int(env,9998);
}
}
//find max min avarage
max = -999999999;
min = 999999999;
sum = 0;
for(i=0;i<n;i++){
if(a[i] > max)
max = a[i];
if(a[i] < min)
min = a[i];
sum = sum+a[i];
}
printf("id max min average\r\n");
printf("%d %f %f %f \r\n", id, max, min, sum/(float)n);
return enif_make_int(env,1);
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
*/
static ERL_NIF_TERM
standardize1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,i,c1,h1,w1,count;
float *a,*b;
float sum,average;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
sum = sum + a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)];
}
}
}
count = in_c * in_h * in_w;
average = sum / (float)count;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
b[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] = a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] - average;
}
}
}
}
return(b_bin);
}
/*
1st arg in_n of 3D tensor
2rd arg in_r of 3D tensor
3th arg in_c of 3D tensor
4th arg binary of tensor
5th arg nth in_r of 3D tensor
*/
static ERL_NIF_TERM
pickup1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_row,in_col,nth,n1,i,j;
float *a,*b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_row)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_col)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &nth)) return enif_make_int(env,5);
n1 = in_n * in_col;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
for(j=0;j<in_col;j++){
b[IDX2C(i,j,in_n)] = a[IDX3C(i,nth,j,in_row,in_col)];
}
}
return(b_bin);
}
// define the array of ErlNifFunc
static ErlNifFunc nif_funcs[] = {
// {erl_function_name, erl_function_arity, c_function}
{"mult1", 6, mult1},
{"new1", 2, new1},
{"new2", 3, new2},
{"new3", 4, new3},
{"new4", 5, new4},
{"rand1", 1, rand1},
{"add1", 3, add1},
{"sub1", 3, sub1},
{"emult1", 3, emult1},
{"transpose1", 3, transpose1},
{"ident1", 1, ident1},
{"activate_sigmoid", 2 ,activate_sigmoid},
{"activate_tanh", 2 , activate_tanh},
{"activate_relu", 2, activate_relu},
{"activate_softmax", 3, activate_softmax},
{"differ_sigmoid", 3, differ_sigmoid},
{"differ_tanh", 3, differ_tanh},
{"differ_relu", 3, differ_relu},
{"smult1", 3, smult1},
{"trace1", 3, trace1},
{"mean_square", 4, mean_square},
{"cross_entropy", 4, cross_entropy},
{"elt1", 5, elt1},
{"set1", 6, set1},
{"add_diff1", 6, add_diff1},
{"add_diff2", 10, add_diff2},
{"average1", 3, average1},
{"sum1", 3, sum1},
{"to_list1", 3, to_list1},
{"to_list2", 4, to_list2},
{"to_list3", 5, to_list3},
{"dropout1", 2 , dropout1},
{"sgd1", 4, sgd1},
{"momentum1", 5, momentum1},
{"adagrad1", 5, adagrad1},
{"rms1", 5, rms1},
{"adam1", 6, adam1},
{"accuracy1", 4, accuracy1},
{"correct1", 4, correct1},
{"pooling1", 7, pooling1},
{"unpooling1", 8, unpooling1},
{"convolute1", 13, convolute1},
{"deconvolute1", 13, deconvolute1},
{"deconvolute2", 13, deconvolute2},
{"gradfilter1", 16, gradfilter1},
{"gradfilter2", 16, gradfilter2},
{"full1", 5, full1},
{"unfull1", 5, unfull1},
{"random_select1", 7, random_select1},
{"random_select2", 9, random_select2},
{"is_near1", 3, is_near1},
{"is_equal1", 3, is_equal1},
{"analizer1", 3, analizer1},
{"standardize1", 5, standardize1},
{"pickup1", 5, pickup1}
};
ERL_NIF_INIT(Elixir.Cumatrix, nif_funcs, NULL, NULL, NULL, NULL)
| d4f9488b0afdc28ee4ebf9c231618613c41168a7.cu | #include "erl_nif.h"
#include "cublas.h"
#include "stdio.h"
#include "time.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define IDX3C(c,i,j,in_h,in_w) ((c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX4C(n,c,i,j,in_c,in_h,in_w) ((n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX5C(t,n,c,i,j,in_n,in_c,in_h,in_w) ((t)*((in_n)*(in_c)*(in_h)*(in_w)) + (n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define BREAK return(enif_make_int(env, 0));
#define PI 3.14159265358979323846
#define SIGMOID(x) (1 / (1+exp(-1*x)))
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
return enif_make_int(env,10000+(int)error); \
} \
}
#define CUBLAS(call) \
{ \
const cublasStatus error = call; \
if (error != CUBLAS_STATUS_SUCCESS) \
{ \
return enif_make_int(env,11000+(int)error); \
} \
}
__global__ void pooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,in_h2,in_w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w;
float max,fmax_h,fmax_w;
n1 = bid;
c1 = tid;
in_h2 = in_h / st_h;
in_w2 = in_w / st_w;
for(w2=0;w2<in_w2;w2++){
for(h2=0;h2<in_h2;h2++){
max = -999999999.0;
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)] >= max){
max = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
max_h = h1;
max_w = w1;
}
}
}
b[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = max;
fmax_h = (float)max_h;
fmax_w = (float)max_w;
c[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = fmax_h * 1000.0 + fmax_w;
}
}
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
6th arg stride
return list [ts1,ts2]
ts1 is result data for forward
ts2 is result data dor backward. this is sparse matrix
e.g.
|0.1,0.2,0.3,0.4|
|0.5,0.6,0.7,0.8|
|0.9,1.0,1.1,1.2|
|1.3,1.4,1.5,1.6|
ts1
|0.6,0.8|
|1.4,1.6|
ts2
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
*/
static ERL_NIF_TERM
pooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,tuple;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &st_h)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_w)) return enif_make_int(env,7);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h / st_h) * (in_w / st_w);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n2 * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
pooling_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host b,c from GPU dev_b,dev_c
CHECK(cudaMemcpy(b, dev_b, n2 * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// return forward data and backward data with tuple {b_bin,c_bin}
tuple = enif_make_tuple2(env,b_bin,c_bin);
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(tuple);
}
__global__ void unpooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w,in_h1,in_w1;
float loss,elt;
n1 = bid;
c1 = tid;
in_h1 = in_h * st_h;
in_w1 = in_w * st_w;
for(h2=0;h2<in_h;h2++){
for(w2=0;w2<in_w;w2++){
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
elt = a[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
loss = b[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
max_h = (int) floor(elt / 1000.0);
max_w = (int) fmodf(elt,1000.0);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 == max_h && w1 == max_w){
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = loss;
}
else{
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = 0.0;
}
}
}
}
}
}
/*
1st arg in_n of sparse-tensor
2nd arg in_c of sparse-tensor
3rd arg in_h of sparse-tensor
4th arg in_w of sparse-tensor
5th arg binary of sparse-tensor
6th arg binary of loss-tensor
7th arg stride
return gradiate tensor
e.g.
ts1 index-tensor
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
ts2 loss-tensor
|0.1,0.2|
|0.3,0.4|
return
|0.0,0.0,0.0,0.0|
|0.0,0.1,0.0,0.2|
|0.0,0.0,0.0,0.0|
|0.0,3.4,0.0,0.4|
*/
static ERL_NIF_TERM
unpooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &st_w)) return enif_make_int(env,8);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h * st_h) * (in_w * st_w);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
unpooling_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void convolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int oh, int ow)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
sum = 0.0;
start_h1 = st_h*h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = st_w*w2-pad;
end_w1 = start_w1 + filt_w;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX4C(c2,c1,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_n,oh,ow)] = sum;
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input tensor
10th arg binary of filter tensor
11th arg stride
12th arg padding
*/
static ERL_NIF_TERM
convolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, n1, n2, n3, oh, ow;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
oh = (in_h+2*pad-filt_h)/st_h + 1;
ow = (in_w+2*pad-filt_w)/st_w + 1;
n3 = in_n * filt_n * oh * ow; // n of filter generate n channel
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
convolute1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad, in_c, in_h, in_w, oh, ow);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void deconvolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad1, int pad, int in_c, int in_h, int in_w, int oh, int ow, int oh1, int ow1)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//full convolute. stride=1 always
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad1;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad1;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
if(h2-pad >=0 && h2-pad < oh1 && w2-pad >= 0 && w2-pad < ow1){
c[IDX4C(n1,c2,h2-pad,w2-pad,filt_c,oh1,ow1)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
memo
ex padding = 1
loss 4*4
filter 2*2
input 3*3 padding=1
(3-2+2*1)/1 + 1 = 4
decovolute compute 5*5(3*3 padding=1) and save result range 3*3
*/
static ERL_NIF_TERM
deconvolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, pad1, n1, n2, n3, oh, ow, oh1, ow1, i,j,k,l;
float *a,*b, *b1, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
pad1 = filt_h - 1;
// pad1 = filt_h -1, pad is original padding size
oh = (in_h+2*pad1-filt_h)/st_h + 1;
ow = (in_w+2*pad1-filt_w)/st_w + 1;
oh1 = (in_h+2*(pad1-pad)-filt_h)/st_h + 1;
ow1 = (in_w+2*(pad1-pad)-filt_w)/st_w + 1;
n3 = in_n * filt_c * oh1 * ow1; // channel of filter generate same channel input tensor
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
deconvolute1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, pad, in_c, in_h, in_w, oh, ow, oh1, ow1);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(b1);
return(c_bin);
}
__global__ void deconvolute2_kernel(float *a1, float *a, float *b, float *c, int filt_n, int filt_c,int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int loss_h, int loss_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
int j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
// caution! stride=1
oh = (in_h+2*pad-filt_h) + 1;
ow = (in_w+2*pad-filt_w) + 1;
//dilate loss tensor.
for(j=0;j<filt_n;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = a[IDX4C(n1,j,k,l,in_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
a1[IDX4C(n1,j,k1,l1,in_c,in_h,in_w)] = elt1;
}
}
}
//full convulute. stride=1
for(c2=0;c2<filt_c;c2++){
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a1[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_c,oh,ow)] = sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
1st arg in_n of input loss tensor
2nd arg in_c of input loss tensor
3rd arg in_h of input loss tensor
4th arg in_w of input loss tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
*/
static ERL_NIF_TERM
deconvolute2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h, filt_w, st_h, st_w,pad, pad1, n1, n2, n3, oh, ow, i,j,k,l, loss_h, loss_w;
float *a, *a1, *b, *b1, *c;
float *dev_a, *dev_a1, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &loss_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &loss_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
// size for dilate
in_h = loss_h + (loss_h - 1)*(st_h - 1);
in_w = loss_w + (loss_w - 1)*(st_w - 1);
n1 = in_n * in_c * in_h * in_w; //loss tensor size
n2 = filt_n * filt_c * filt_h * filt_w; //filter tensor size
pad1 = (filt_h - 1) + pad; //padding size with dilate
oh = (in_h+2*pad1-filt_h) + 1; //output deconvolute tensor size. caution stride=1.
ow = (in_w+2*pad1-filt_w) + 1; //
n3 = in_n * filt_c * oh * ow; //
a = (float *) a_bin.data;
b = (float *) b_bin.data;
a1 = (float *) enif_alloc(n1 * sizeof(float));
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// dilate
for(i=0;i<n1;i++){
a1[i] = 0.0;
}
CHECK(cudaMalloc((void**)&dev_a1, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_a, in_n*1*loss_h*loss_w * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
CHECK(cudaMemcpy(dev_a1, a1, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_a, a, in_n*1*loss_h*loss_w * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
deconvolute2_kernel <<<blocks, filt_c>> >(dev_a1, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, in_c, in_h, in_w, loss_h, loss_w);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_a1);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(a1);
enif_free(b1);
return(c_bin);
}
__global__ void gradfilter1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h;h2++){
for(w2=0;w2<loss_w;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b[IDX4C(n1,c2,h2,w2,loss_c,loss_h,loss_w)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,i,j,k,l,m;
float *a,*b,*c,*d;
float *dev_a, *dev_b, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(d_bin);
}
__global__ void gradfilter2_kernel(float *a, float *b1, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3,loss_h1,loss_w1,j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//dilated loss tensor size
loss_h1 = loss_h+(loss_h-1)*(st_h-1);
loss_w1 = loss_w+(loss_w-1)*(st_w-1);
//dilate loss tensor.
for(j=0;j<loss_c;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = b[IDX4C(n1,j,k,l,loss_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
b1[IDX4C(n1,j,k1,l1,loss_c,loss_h1,loss_w1)] = elt1;
}
}
}
//convolute input tensor with dilated loss tensor. cuation stride is always 1.
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h1;h2++){
for(w2=0;w2<loss_w1;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b1[IDX4C(n1,c2,h2,w2,loss_c,loss_h1,loss_w1)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = + sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
gradfilter2 is for stride >= 2. This one requires dilate
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,n5,i,j,k,l,m;
float *a,*b,*b1,*c,*d;
float *dev_a, *dev_b, *dev_b1, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
n5 = in_n * loss_c * (loss_h+(loss_h-1)*(st_h-1)) * (loss_w+(loss_w-1)*(st_w-1)); // dilated loss tensor size
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n5 * sizeof(float)); // dilate loss tensor area
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
//initialize b1
for(i=0;i<n5;i++){
b1[i] = 0.0;
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b1, n5 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b1, b1, n5 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter2_kernel <<<blocks, threads>> >(dev_a, dev_b1, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_b1);
cudaFree(dev_c);
enif_free(b1);
return(d_bin);
}
__global__ void full_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX4C(n1,i,j,k,in_c,in_h,in_w)];
b[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
full1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
full_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void unfull_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)];
b[IDX4C(n1,i,j,k,in_c,in_h,in_w)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
unfull1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
unfull_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
new1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
ERL_NIF_TERM a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &d)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
a[i] = (float)d;
}
return(a_bin);
}
static ERL_NIF_TERM
new2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int r1,c1,i,j;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, r1 * c1 * sizeof(float), &a_bin);
// Set matrix data
list = argv[2]; /* matrix1 */
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX2C(i,j,r1)] = (float)d;
}
}
return(a_bin);
}
static ERL_NIF_TERM
new3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int c,h,w,i,j,k;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
a = (float *) enif_make_new_binary(env, c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[3]; /* matrix1 */
for(i=0;i<c;i++){
for(j=0;j<h;j++){
for(k=0;k<w;k++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX3C(i,j,k,h,w)] = (float)d;
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
new4(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,c,h,w,i,j,k,l;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
a = (float *) enif_make_new_binary(env, n * c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[4]; /* matrix1 */
for(i=0;i<n;i++){
for(j=0;j<c;j++){
for(k=0;k<h;k++){
for(l=0;l<w;l++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX4C(i,j,k,l,c,h,w)] = (float)d;
}
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
rand1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
float x,y,val;
float *result_data;
ERL_NIF_TERM result;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
result_data = (float *) enif_make_new_binary(env, n * sizeof(float), &result);
srand((unsigned) time(NULL));
for(i=0;i<n;i++){
//box_muller
x = (float)rand()/(float)RAND_MAX;
y = (float)rand()/(float)RAND_MAX;
val = sqrt(-2.0 * log(x)) * cos(2.0 * PI * y);
result_data[i] = val;
}
return(result);
}
static ERL_NIF_TERM
mult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, r2, c2, n, i, j;
float *a,*b,*c;
float* devPtrA;
float* devPtrB;
float* devPtrC;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin)) return enif_make_int(env,6);
n = r1*c2;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
for(j=0;j<c2;j++)
for(i=0;i<r1;i++)
c[IDX2C(i,j,r1)] = 0.0;
// Initialize CUBLAS
cublasInit();
CUBLAS(cublasAlloc (r1*c1, sizeof(*a), (void**)&devPtrA));
CUBLAS(cublasAlloc (r2*c2, sizeof(*b), (void**)&devPtrB));
CUBLAS(cublasAlloc (r1*c2, sizeof(*c), (void**)&devPtrC));
CUBLAS(cublasSetMatrix (r1, c1, sizeof(*a), a, r1, devPtrA, r1));
CUBLAS(cublasSetMatrix (r2, c2, sizeof(*b), b, r2, devPtrB, r2));
CUBLAS(cublasSetMatrix (r1, c2, sizeof(*c), c, r1, devPtrC, r1));
//Sgemm
cublasSgemm('N', 'N', r1, c2, c1, 1.0, devPtrA, r1, devPtrB, r2, 0.0, devPtrC, r1);
CUBLAS(cublasGetMatrix (r1, c2, sizeof(*c), devPtrC, r1, c, r1));
// Shutdown CUBLAS
cublasFree(devPtrA);
cublasFree(devPtrB);
cublasFree(devPtrC);
cublasShutdown();
return(c_bin);
}
__global__ void add1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
add1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
add1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void sub1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
sub1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sub1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void emult1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
emult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
emult1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
static ERL_NIF_TERM
transpose1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j;
float *a,*b;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
b[IDX2C(j,i,c1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
ident1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i,j;
ERL_NIF_TERM a_bin;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
a = (float *) enif_make_new_binary(env, n * n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(i==j)
a[IDX2C(i,j,n)] = 1.0;
else
a[IDX2C(i,j,n)] = 0.0;
}
}
return(a_bin);
}
__global__ void sigmoid_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = SIGMOID(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void tanh_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = tanh(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
tanh_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void relu_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] >= 0)
b[tid] = a[tid];
else
b[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
relu_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
activate_softmax(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, k;
float *a,*b;
float max,sum,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
//calculate softmax
delta = 0.01;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
max = -3.402823e38;
for(k=0;k<c1;k++){
if(a[IDX2C(i,k,r1)] > max)
max = a[IDX2C(i,k,r1)];
}
sum = 0.0;
for(k=0;k<c1;k++){
sum = sum + exp(a[IDX2C(i,k,r1)] - max);
}
b[IDX2C(i,j,r1)] = exp(a[IDX2C(i,j,r1)] - max) / (sum+delta);
}
}
return(b_bin);
}
__global__ void differ_sigmoid_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * ((1 - SIGMOID(b[tid])) * SIGMOID(b[tid]));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_tanh_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * (1/(cosh(b[tid]) * cosh(b[tid])));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_tanh_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_relu_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(b[tid] >= 0)
c[tid] = a[tid];
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_relu_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void smult_kernel(float d, float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = d * a[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
smult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
double s;
if (!enif_get_double(env, argv[0], &s)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &n)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
smult_kernel << <128, 128 >> >((float)s,dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
trace1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float trace;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
trace = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==j)
trace = trace + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,trace);
return(result);
}
static ERL_NIF_TERM
mean_square(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] - b[IDX2C(i,j,r1)];
s = s + d*d;
}
}
s = s / (2.0*(float(r1)));
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
cross_entropy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
delta = 1e-7;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] + delta;
s = s + b[IDX2C(i,j,r1)] * log(d);
}
}
s = -1.0 * s / (float)r1;
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
elt1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &i)) enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &j)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
a = (float *) a_bin.data;
result = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
return(result);
}
static ERL_NIF_TERM
set1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)] + (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n1, c1, h1, w1, n, i, j, k, l, n2, c2, h2, w2;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &n2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &h2)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &w2)) return enif_make_int(env,9);
if (!enif_get_double(env, argv[9], &val)) return enif_make_int(env,10);
n = n1*c1*h1*w1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<n1;i++){
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
if(i==n2 && j==c2 && k==h2 && l==w2){
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)] + (float)val;
}
else {
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)];
}
}
}
}
}
return(b_bin);
}
static ERL_NIF_TERM
average1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, i, j;
float *a,*b;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, c1 * sizeof(float), &b_bin);
for(j=0;j<c1;j++){
sum = 0.0;
for(i=0;i<r1;i++){
sum = sum + a[IDX2C(i,j,r1)];
}
b[j] = sum / (float)r1;
}
return(b_bin);
}
/*
1st arg row-size of matrix
2nd arg col-size of matrix
3rd arg matrix data binary
*/
static ERL_NIF_TERM
sum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
sum = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
sum = sum + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,sum);
return(result);
}
/*
transfer 2 DIm matrix to list
*/
static ERL_NIF_TERM
to_list1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=r1-1;i>=0;i--){
for(j=c1-1;j>=0;j--){
head = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
list = enif_make_list_cell(env,head,list);
}
}
return(list);
}
/*
transfer 3 DIm matrix to list
*/
static ERL_NIF_TERM
to_list2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int c, h, w, i, j, k;
float *a;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=c-1;i>=0;i--){
for(j=h-1;j>=0;j--){
for(k=w-1;k>=0;k--){
head = enif_make_double(env,(double)a[IDX3C(i,j,k,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
return(list);
}
/*
transfer 4 DIm matrix to list
*/
static ERL_NIF_TERM
to_list3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int n, c, h, w, i, j, k, l;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_badarg(env);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=n-1;i>=0;i--){
for(j=c-1;j>=0;j--){
for(k=h-1;k>=0;k--){
for(l=w-1;l>=0;l--){
head = enif_make_double(env,(double)a[IDX4C(i,j,k,l,c,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
}
return(list);
}
__global__ void dropout1_kernel(float *a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
a[tid] = 1.0;
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg size of mask tensor
2nd arg rate of dropout
return mask tensor
element of mask tensor is basicaly 1.0.
element of dropout rate is 0.0.
when forward and backward, generate Hadamard product with mask tensor
*/
static ERL_NIF_TERM
dropout1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ERL_NIF_TERM a_bin;
int n,count,i,j;
float *a,*dev_a;
double dropout_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &dropout_rate)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
dropout1_kernel << <128, 128 >> >(dev_a, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(a, dev_a, n * sizeof(float), cudaMemcpyDeviceToHost));
// dropout
count = (int)(double(n)*dropout_rate);
for(i=0;i<count;i++){
j = rand() % n;
a[j] = 0.0;
}
// free
cudaFree(dev_a);
return(a_bin);
}
__global__ void sgd1_kernel(float *a, float *b, float *c, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid]*lr;
tid += blockDim.x * gridDim.x;
}
}
/*
w - g*lr
w is weight matrix.
g is gradient matrix.
when element of w is zero result is zero. This means dropout.
return updated weight matrix.
1st arg is size of vectorized matrix
2nd arg is weight matrix or tensor
3rd arg is gradient matrix or tensor
4th arg is learning rate
*/
static ERL_NIF_TERM
sgd1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c,*dev_a, *dev_b, *dev_c;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_get_double(env, argv[3], &learning_rate)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sgd1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, lr, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
/*
def momentum(v, g, lr) do
Matrex.apply(v, g, fn v, g -> 0.5 * v - lr * g end)
end
*/
__global__ void momentum_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = ((0.9 * b[tid]) - (lr * c[tid]));
e[tid] = a[tid] + d[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a)
3rd arg v-matrix (b)
4th arg gradient-matrix (c)
5th arg learning rate
return tuple {next_v-mattrix,weight_matrix}
*/
static ERL_NIF_TERM
momentum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c ,*dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
momentum_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAGRAD
h1 = h + grad*grad
lr1 = lr/(sqrt(h1))
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void adagrad_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1;
while (tid < n)
{
d[tid] = b[tid] + c[tid]*c[tid];
if(d[tid] != 0.0)
lr1 = lr/(sqrt(d[tid]));
else
lr1 = lr;
e[tid] = a[tid] - lr1 * c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
adagrad1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
adagrad_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* RMSprop
h1 = alpha * h + (1 - alpha) * grad*grad
lr1 = lr /(sqrt(h) + epsilon)
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void rms_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1,alpha,epsilon;
alpha = 0.99;
epsilon = 10.0e-7;
while (tid < n)
{
d[tid] = alpha * b[tid] + (1-alpha)*c[tid]*c[tid];
lr1 = lr/(sqrt(d[tid])+epsilon);
e[tid] = a[tid] - lr1*c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
rms1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
rms_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAM
beta1 = 0.9
beta2 = 0.999
epsilon = 10.0e-7
alpha = 0.001
m1 = beta1 * m + (1 - beta1) * grad
v1 = beta2 * v + (1 - beta2) * grad^2
m2 = m1/(1 - beta1)
v2 = v1/(1 - beta2)
w1 = w - alpha * m2/(sqrt(v2)+epsilon)
a[] is w
b[] is m
c[] is v
d[] is grad
e[] is m1
f[] is v1
g[] is w1
*/
__global__ void adam_kernel(float *a, float *b, float *c, float *d, float *e, float *f, float *g, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float beta1,beta2,epsilon,m2,v2;
beta1 = 0.9;
beta2 = 0.999;
epsilon = 10.0e-7;
//alpha = 0.001;
while (tid < n){
e[tid] = beta1 * b[tid] + (1 - beta1) * d[tid];
f[tid] = beta2 * c[tid] + (1 - beta2) * d[tid]*d[tid];
m2 = e[tid]/(1-beta1);
v2 = f[tid]/(1-beta2);
g[tid] = a[tid] - lr * (m2/(sqrt(v2)+epsilon));
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg w-matrix (a_bin)
3rd arg m-matrix (b_bin)
4th arg v-matrix (c_bin)
5th arg grad-matrix (d_bin)
6th arg learning rate
return tuple {m1,v1,w1}
*/
static ERL_NIF_TERM
adam1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin,d_bin;
ERL_NIF_TERM e_bin,f_bin,g_bin,tuple;
int n;
float *a,*b,*c,*d,*e,*f,*g;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e, *dev_f, *dev_g;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &d_bin)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &learning_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) d_bin.data;
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
f = (float *) enif_make_new_binary(env, n * sizeof(float), &f_bin);
g = (float *) enif_make_new_binary(env, n * sizeof(float), &g_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_f, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_g, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_f, f, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_g, g, n * sizeof(float), cudaMemcpyHostToDevice));
adam_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, dev_f, dev_g, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(f, dev_f, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(g, dev_g, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
cudaFree(dev_f);
cudaFree(dev_g);
tuple = enif_make_tuple3(env,e_bin,f_bin,g_bin);
return(tuple);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return accuracy rate
*/
static ERL_NIF_TERM
accuracy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
double max,rate;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate accuracy
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
rate = (double)sum / (double)r1;
result = enif_make_double(env,rate);
return(result);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return correct number
*/
static ERL_NIF_TERM
correct1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
float max;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate correct number
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
result = enif_make_double(env,(double)sum);
return(result);
}
static ERL_NIF_TERM
random_select1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int r1, c1, r2, c2, i, j, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &n)) return enif_make_int(env,7);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % r1;
for(j=0;j<c1;j++){
c[IDX2C(i,j,n)] = a[IDX2C(r,j,r1)];
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
random_select2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,c1,h1,w1,r2,c2, i, j, k, l, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &r2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &n)) return enif_make_int(env,9);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
c[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(r,j,k,l,c1,h1,w1)];
}
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
is_near1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n, sw;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// near check
sw = 0;
for(i=0;i<n;i++){
if(fabsf(a[i]) > fabsf(b[i])*1.15 || fabsf(a[i]) < fabsf(b[i])*0.85){
printf("%f %f \r\n", a[i], b[i]);
sw = 1;
}
}
if(sw == 0)
return enif_make_int(env,1); //true
else
return enif_make_int(env,0); //false
}
static ERL_NIF_TERM
is_equal1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// equal check
for(i=0;i<n;i++){
if(a[i] != b[i]){
return enif_make_int(env,0); //false
}
}
return enif_make_int(env,1); //true
}
static ERL_NIF_TERM
analizer1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
int i, n, id;
float *a;
float max,min,sum;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &id)) return enif_make_int(env,3);
a = (float *) a_bin.data;
// near check
for(i=0;i<n;i++){
if(isnan(a[i])){
return enif_make_int(env,9999);
}
if(isinf(a[i])){
return enif_make_int(env,9998);
}
}
//find max min avarage
max = -999999999;
min = 999999999;
sum = 0;
for(i=0;i<n;i++){
if(a[i] > max)
max = a[i];
if(a[i] < min)
min = a[i];
sum = sum+a[i];
}
printf("id max min average\r\n");
printf("%d %f %f %f \r\n", id, max, min, sum/(float)n);
return enif_make_int(env,1);
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
*/
static ERL_NIF_TERM
standardize1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,i,c1,h1,w1,count;
float *a,*b;
float sum,average;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
sum = sum + a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)];
}
}
}
count = in_c * in_h * in_w;
average = sum / (float)count;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
b[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] = a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] - average;
}
}
}
}
return(b_bin);
}
/*
1st arg in_n of 3D tensor
2rd arg in_r of 3D tensor
3th arg in_c of 3D tensor
4th arg binary of tensor
5th arg nth in_r of 3D tensor
*/
static ERL_NIF_TERM
pickup1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_row,in_col,nth,n1,i,j;
float *a,*b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_row)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_col)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &nth)) return enif_make_int(env,5);
n1 = in_n * in_col;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
for(j=0;j<in_col;j++){
b[IDX2C(i,j,in_n)] = a[IDX3C(i,nth,j,in_row,in_col)];
}
}
return(b_bin);
}
// define the array of ErlNifFunc
static ErlNifFunc nif_funcs[] = {
// {erl_function_name, erl_function_arity, c_function}
{"mult1", 6, mult1},
{"new1", 2, new1},
{"new2", 3, new2},
{"new3", 4, new3},
{"new4", 5, new4},
{"rand1", 1, rand1},
{"add1", 3, add1},
{"sub1", 3, sub1},
{"emult1", 3, emult1},
{"transpose1", 3, transpose1},
{"ident1", 1, ident1},
{"activate_sigmoid", 2 ,activate_sigmoid},
{"activate_tanh", 2 , activate_tanh},
{"activate_relu", 2, activate_relu},
{"activate_softmax", 3, activate_softmax},
{"differ_sigmoid", 3, differ_sigmoid},
{"differ_tanh", 3, differ_tanh},
{"differ_relu", 3, differ_relu},
{"smult1", 3, smult1},
{"trace1", 3, trace1},
{"mean_square", 4, mean_square},
{"cross_entropy", 4, cross_entropy},
{"elt1", 5, elt1},
{"set1", 6, set1},
{"add_diff1", 6, add_diff1},
{"add_diff2", 10, add_diff2},
{"average1", 3, average1},
{"sum1", 3, sum1},
{"to_list1", 3, to_list1},
{"to_list2", 4, to_list2},
{"to_list3", 5, to_list3},
{"dropout1", 2 , dropout1},
{"sgd1", 4, sgd1},
{"momentum1", 5, momentum1},
{"adagrad1", 5, adagrad1},
{"rms1", 5, rms1},
{"adam1", 6, adam1},
{"accuracy1", 4, accuracy1},
{"correct1", 4, correct1},
{"pooling1", 7, pooling1},
{"unpooling1", 8, unpooling1},
{"convolute1", 13, convolute1},
{"deconvolute1", 13, deconvolute1},
{"deconvolute2", 13, deconvolute2},
{"gradfilter1", 16, gradfilter1},
{"gradfilter2", 16, gradfilter2},
{"full1", 5, full1},
{"unfull1", 5, unfull1},
{"random_select1", 7, random_select1},
{"random_select2", 9, random_select2},
{"is_near1", 3, is_near1},
{"is_equal1", 3, is_equal1},
{"analizer1", 3, analizer1},
{"standardize1", 5, standardize1},
{"pickup1", 5, pickup1}
};
ERL_NIF_INIT(Elixir.Cumatrix, nif_funcs, NULL, NULL, NULL, NULL)
|
9f428ca5ac7e2cdcd4d503fffc408699f5e3b1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void map(const double* __restrict__ arrIn0_1, const double* __restrict__ arrIn0_0, double* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const double x0 = arrIn0_0[ix];
arrOut_0[ix] = x0;
}
}
| 9f428ca5ac7e2cdcd4d503fffc408699f5e3b1bb.cu | #include <accelerate_cuda.h>
extern "C" __global__ void map(const double* __restrict__ arrIn0_1, const double* __restrict__ arrIn0_0, double* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const double x0 = arrIn0_0[ix];
arrOut_0[ix] = x0;
}
}
|
400e6ae93b829ed5169a18cab872c7856a0348c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
template<typename T>
T min(T a, T b) {
return (a < b) ? a : b;
}
template<typename T>
T max(T a, T b) {
return (a > b) ? a : b;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
extern __shared__ float s_data[];
int l = blockIdx.x * blockDim.x;
int r = (l + blockDim.x < numCols) ? (l + blockDim.x) : numCols;
int u = blockIdx.y * blockDim.y;
int b = (u + blockDim.y < numRows) ? (u + blockDim.y) : numRows;
int padding = filterWidth / 2;
int sizes[2] = {b - u + 2 * padding, r - l + 2 * padding};
int strides[2] = {blockDim.x + 2 * padding, 1};
for(int i = threadIdx.y; i < sizes[0]; i += blockDim.y) {
for(int j = threadIdx.x; j < sizes[1]; j += blockDim.x) {
int di = min(numRows - 1, max(0, u - padding + i));
int dj = min(numCols - 1, max(0, l - padding + j));
s_data[i * strides[0] + j] = inputChannel[di * numCols + dj];
// __syncthreads();
}
// __syncthreads();
}
__syncthreads();
if(ix >= numCols || iy >= numRows) {
return;
}
float out = 0;
for(int i = threadIdx.y; i < threadIdx.y + filterWidth; ++i) {
for(int j = threadIdx.x; j < threadIdx.x + filterWidth; ++j) {
int fi = (i - threadIdx.y) * filterWidth + (j - threadIdx.x);
out += s_data[i * strides[0] + j] * filter[fi];
}
}
outputChannel[ix + iy * numCols] = out;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if(ix >= numCols || iy >= numRows) {
return;
}
int idx = ix + iy * numCols;
uchar4 pixel = inputImageRGBA[idx];
redChannel[idx] = pixel.x;
greenChannel[idx] = pixel.y;
blueChannel[idx] = pixel.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
size_t nbytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(hipMalloc(&d_filter, nbytes));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, nbytes, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
int BLOCK_SIZE = 32;
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + BLOCK_SIZE - 1) / BLOCK_SIZE,
(numRows + BLOCK_SIZE - 1) / BLOCK_SIZE);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
int padding = filterWidth / 2;
size_t nbytes_shared = sizeof(float) * (BLOCK_SIZE + 2 * padding) * (BLOCK_SIZE + 2 * padding);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), nbytes_shared, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), nbytes_shared, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), nbytes_shared, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 400e6ae93b829ed5169a18cab872c7856a0348c3.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
template<typename T>
T min(T a, T b) {
return (a < b) ? a : b;
}
template<typename T>
T max(T a, T b) {
return (a > b) ? a : b;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
extern __shared__ float s_data[];
int l = blockIdx.x * blockDim.x;
int r = (l + blockDim.x < numCols) ? (l + blockDim.x) : numCols;
int u = blockIdx.y * blockDim.y;
int b = (u + blockDim.y < numRows) ? (u + blockDim.y) : numRows;
int padding = filterWidth / 2;
int sizes[2] = {b - u + 2 * padding, r - l + 2 * padding};
int strides[2] = {blockDim.x + 2 * padding, 1};
for(int i = threadIdx.y; i < sizes[0]; i += blockDim.y) {
for(int j = threadIdx.x; j < sizes[1]; j += blockDim.x) {
int di = min(numRows - 1, max(0, u - padding + i));
int dj = min(numCols - 1, max(0, l - padding + j));
s_data[i * strides[0] + j] = inputChannel[di * numCols + dj];
// __syncthreads();
}
// __syncthreads();
}
__syncthreads();
if(ix >= numCols || iy >= numRows) {
return;
}
float out = 0;
for(int i = threadIdx.y; i < threadIdx.y + filterWidth; ++i) {
for(int j = threadIdx.x; j < threadIdx.x + filterWidth; ++j) {
int fi = (i - threadIdx.y) * filterWidth + (j - threadIdx.x);
out += s_data[i * strides[0] + j] * filter[fi];
}
}
outputChannel[ix + iy * numCols] = out;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if(ix >= numCols || iy >= numRows) {
return;
}
int idx = ix + iy * numCols;
uchar4 pixel = inputImageRGBA[idx];
redChannel[idx] = pixel.x;
greenChannel[idx] = pixel.y;
blueChannel[idx] = pixel.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
size_t nbytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(cudaMalloc(&d_filter, nbytes));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, nbytes, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
int BLOCK_SIZE = 32;
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + BLOCK_SIZE - 1) / BLOCK_SIZE,
(numRows + BLOCK_SIZE - 1) / BLOCK_SIZE);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
int padding = filterWidth / 2;
size_t nbytes_shared = sizeof(float) * (BLOCK_SIZE + 2 * padding) * (BLOCK_SIZE + 2 * padding);
gaussian_blur<<<gridSize, blockSize, nbytes_shared>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, nbytes_shared>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, nbytes_shared>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
a90248282a04d4748c3cb04b8e7c2cd89447e04a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zhemv_mgpu.cu, normal z -> c, Mon Jun 25 18:24:15 2018
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L_mgpu(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in chemv (single GPU); why?
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_C_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_C_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_chemv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
chemv_kernel_L_mgpu_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
magmaFloatComplex Ax = MAGMA_C_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_chemv_sync for beta*y
}
}
// end chemv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_chemv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a COMPLEX array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y COMPLEX array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) COMPLEX array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a COMPLEX array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
magmaFloatComplex const *x, magma_int_t incx,
magmaFloatComplex beta, // unused, see magmablas_chemv_mgpu_sync
magmaFloatComplex *y, magma_int_t incy, // unused
magmaFloatComplex *hwork, magma_int_t lhwork,
magmaFloatComplex_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
magmaFloatComplex const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
magmaFloatComplex *dx_dev = dwork[dev];
magmaFloatComplex *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_csetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( chemv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( chemv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( chemv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( chemv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
magmaFloatComplex *dx_dev = dwork[dev];
magma_cgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_chemv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final chemv result.
For convenience, the parameters are identical to magmablas_chemv_mgpu
(though some are unused here).
@see magmablas_chemv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_chemv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_chemv_mgpu
magma_int_t n,
magmaFloatComplex alpha, // unused
magmaFloatComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
magmaFloatComplex const *x, magma_int_t incx, // unused
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy, // unused
magmaFloatComplex *hwork, magma_int_t lhwork,
magmaFloatComplex_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const magmaFloatComplex c_one = MAGMA_C_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_cscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_caxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
| a90248282a04d4748c3cb04b8e7c2cd89447e04a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zhemv_mgpu.cu, normal z -> c, Mon Jun 25 18:24:15 2018
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L_mgpu(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in chemv (single GPU); why?
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_C_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_C_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_chemv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
chemv_kernel_L_mgpu_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
magmaFloatComplex Ax = MAGMA_C_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_chemv_sync for beta*y
}
}
// end chemv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_chemv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a COMPLEX array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y COMPLEX array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) COMPLEX array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a COMPLEX array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
magmaFloatComplex const *x, magma_int_t incx,
magmaFloatComplex beta, // unused, see magmablas_chemv_mgpu_sync
magmaFloatComplex *y, magma_int_t incy, // unused
magmaFloatComplex *hwork, magma_int_t lhwork,
magmaFloatComplex_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
magmaFloatComplex const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
magmaFloatComplex *dx_dev = dwork[dev];
magmaFloatComplex *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_csetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
chemv_kernel_U_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
chemv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
chemv_kernel_L_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
chemv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
magmaFloatComplex *dx_dev = dwork[dev];
magma_cgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_chemv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final chemv result.
For convenience, the parameters are identical to magmablas_chemv_mgpu
(though some are unused here).
@see magmablas_chemv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_chemv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_chemv_mgpu
magma_int_t n,
magmaFloatComplex alpha, // unused
magmaFloatComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
magmaFloatComplex const *x, magma_int_t incx, // unused
magmaFloatComplex beta,
magmaFloatComplex *y, magma_int_t incy, // unused
magmaFloatComplex *hwork, magma_int_t lhwork,
magmaFloatComplex_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const magmaFloatComplex c_one = MAGMA_C_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_cscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_caxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
b83052d77595566071f0e7a712de08d432c5f738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void bcnn_forward_upsample_cuda_kernel(size_t dst_sz, float *src, int w, int h, int c, int n, int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int src_w = dst_w / size;
int src_h = dst_h / size;
int src_c = dst_c;
int src_idx = b * w * h * c + src_c * w * h + src_h * w + src_w;
dst[dst_idx] += src[src_idx];
} | b83052d77595566071f0e7a712de08d432c5f738.cu | #include "includes.h"
__global__ void bcnn_forward_upsample_cuda_kernel(size_t dst_sz, float *src, int w, int h, int c, int n, int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int src_w = dst_w / size;
int src_h = dst_h / size;
int src_c = dst_c;
int src_idx = b * w * h * c + src_c * w * h + src_h * w + src_w;
dst[dst_idx] += src[src_idx];
} |
4ec7976f9339acd8ad1f2df86e07ac1cc9a0b796.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define NSTREAMS 2
__host__ void init(float *out, int sz)
{
for(int i = 0 ; i < sz ; i++)
{
out[i] = sin(float(i));
}
}
__host__ void verif(float *out, int sz)
{
float err = 0.;
for(int i = 0 ; i < sz ; i++)
{
err += abs(out[i] - exp( - abs(sin(float(i))) ));
}
if (err/sz < 1.e-4)
{
printf("TEST PASSED (error %3.f < 1.e-4)\n", err/sz);
}
else
{
printf("TEST FAILED (error %3.f > 1.e-4)\n", err/sz);
}
}
__global__ void MyKernel(float *out, float *in, int sz)
{
for(
int i = blockIdx.x * blockDim.x + threadIdx.x ;
i < sz ;
i += blockDim.x * gridDim.x )
{
out[i] = exp( - abs(in[i]) );
}
}
int main(int argc, char **argv)
{
int size, nblocks, nthreads = 128;
if (argc == 2)
size = atoi(argv[1]);
else
size = 1000;
nblocks = (size + nthreads-1) / nthreads;
printf("size : %d\n", size);
printf("NSTREAMS : %d\n", NSTREAMS);
printf("Taille des tableaux : %d\n", NSTREAMS*size);
printf("nblocks : %d\n", nblocks);
printf("nthreads : %d\n", nthreads);
float *hostPtr, *inputDevPtr, *outputDevPtr;
struct timeval time1, time2;
gettimeofday(&time1, NULL);
hipMalloc((void**)&inputDevPtr, NSTREAMS * size * sizeof(float));
hipMalloc((void**)&outputDevPtr, NSTREAMS * size * sizeof(float));
hipHostMalloc((void**)&hostPtr, NSTREAMS * size * sizeof(float));
init(hostPtr, NSTREAMS * size);
hipStream_t stream[NSTREAMS];
for (int i = 0; i < NSTREAMS; ++i)
hipStreamCreate(&stream[i]);
for (int i = 0; i < NSTREAMS; ++i)
hipMemcpyAsync(inputDevPtr + i * size, hostPtr + i * size, size*sizeof(float), hipMemcpyHostToDevice, stream[i]);
for (int i = 0; i < NSTREAMS; ++i)
hipLaunchKernelGGL(( MyKernel), dim3(nblocks), dim3(nthreads), 0, stream[i], outputDevPtr + i * size, inputDevPtr + i * size, size);
for (int i = 0; i < NSTREAMS; ++i)
hipMemcpyAsync(hostPtr + i * size, outputDevPtr + i * size, size*sizeof(float), hipMemcpyDeviceToHost, stream[i]);
hipDeviceSynchronize();
for (int i = 0; i < NSTREAMS; ++i)
hipStreamDestroy(stream[i]);
verif(hostPtr, NSTREAMS*size);
hipFree(inputDevPtr);
hipFree(outputDevPtr);
hipHostFree(hostPtr);
gettimeofday(&time2, NULL);
printf("KERNEL EXECUTION TIME:-- %d s %d us --\n", time2.tv_sec - time1.tv_sec, time2.tv_usec - time1.tv_usec);
return 0;
}
| 4ec7976f9339acd8ad1f2df86e07ac1cc9a0b796.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define NSTREAMS 2
__host__ void init(float *out, int sz)
{
for(int i = 0 ; i < sz ; i++)
{
out[i] = sin(float(i));
}
}
__host__ void verif(float *out, int sz)
{
float err = 0.;
for(int i = 0 ; i < sz ; i++)
{
err += abs(out[i] - exp( - abs(sin(float(i))) ));
}
if (err/sz < 1.e-4)
{
printf("TEST PASSED (error %3.f < 1.e-4)\n", err/sz);
}
else
{
printf("TEST FAILED (error %3.f > 1.e-4)\n", err/sz);
}
}
__global__ void MyKernel(float *out, float *in, int sz)
{
for(
int i = blockIdx.x * blockDim.x + threadIdx.x ;
i < sz ;
i += blockDim.x * gridDim.x )
{
out[i] = exp( - abs(in[i]) );
}
}
int main(int argc, char **argv)
{
int size, nblocks, nthreads = 128;
if (argc == 2)
size = atoi(argv[1]);
else
size = 1000;
nblocks = (size + nthreads-1) / nthreads;
printf("size : %d\n", size);
printf("NSTREAMS : %d\n", NSTREAMS);
printf("Taille des tableaux : %d\n", NSTREAMS*size);
printf("nblocks : %d\n", nblocks);
printf("nthreads : %d\n", nthreads);
float *hostPtr, *inputDevPtr, *outputDevPtr;
struct timeval time1, time2;
gettimeofday(&time1, NULL);
cudaMalloc((void**)&inputDevPtr, NSTREAMS * size * sizeof(float));
cudaMalloc((void**)&outputDevPtr, NSTREAMS * size * sizeof(float));
cudaMallocHost((void**)&hostPtr, NSTREAMS * size * sizeof(float));
init(hostPtr, NSTREAMS * size);
cudaStream_t stream[NSTREAMS];
for (int i = 0; i < NSTREAMS; ++i)
cudaStreamCreate(&stream[i]);
for (int i = 0; i < NSTREAMS; ++i)
cudaMemcpyAsync(inputDevPtr + i * size, hostPtr + i * size, size*sizeof(float), cudaMemcpyHostToDevice, stream[i]);
for (int i = 0; i < NSTREAMS; ++i)
MyKernel<<<nblocks, nthreads, 0, stream[i]>>> (outputDevPtr + i * size, inputDevPtr + i * size, size);
for (int i = 0; i < NSTREAMS; ++i)
cudaMemcpyAsync(hostPtr + i * size, outputDevPtr + i * size, size*sizeof(float), cudaMemcpyDeviceToHost, stream[i]);
cudaThreadSynchronize();
for (int i = 0; i < NSTREAMS; ++i)
cudaStreamDestroy(stream[i]);
verif(hostPtr, NSTREAMS*size);
cudaFree(inputDevPtr);
cudaFree(outputDevPtr);
cudaFreeHost(hostPtr);
gettimeofday(&time2, NULL);
printf("KERNEL EXECUTION TIME:-- %d s %d us --\n", time2.tv_sec - time1.tv_sec, time2.tv_usec - time1.tv_usec);
return 0;
}
|
2411b04c66f9d65f616ddacd8cd341ca4f18c297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
} | 2411b04c66f9d65f616ddacd8cd341ca4f18c297.cu | #include "includes.h"
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
} |
3cbda6421315d5fc44c6561a9310420d865a63df.hip | // !!! This is a file automatically generated by hipify!!!
// In this assignment you will write a kernel for decimation of an integer
// array by factor of two (which sums two neighbouring integer numbers into
// one). More description is in notes for this practical.
// Your tasks are:
// 1) to calculate decimation by factor of two using GPU and store
// the result to the host.
// 2) to calculate decimation by factor of four using GPU and store
// the result to the host.
// To do that you have to:
// 1) write the host code to manage memory, data initialization and data
// transfers
// 2) write GPU kernel which is appropriate for the tasks
// 3) configure grid for your GPU kernel
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
int Compare_results(int *GPU_output, int *h_input, size_t size, int DIT_factor){
int itemp;
size_t half_size;
int error = 0;
int nErrors = 0;
int cislo = 0;
half_size = size/DIT_factor;
for(size_t f=0; f<half_size; f++){
itemp=0;
for(int t=0; t<DIT_factor; t++){
itemp = itemp + h_input[f*DIT_factor + t];
}
error = (GPU_output[f]-itemp);
if(error!=0 && cislo<20) {
printf("f: %d; error: %d; GPU: %d; CPU: %d\n", f, error, GPU_output[f], itemp);
cislo++;
}
if(error!=0) nErrors++;
}
return(nErrors);
}
void Check_errors(int *GPU_sum2, int *GPU_sum4, int *h_input, size_t size){
int nErrors = 0;
printf("Checking result for sum of two elements...\n");
nErrors = Compare_results(GPU_sum2, h_input, size, 2);
if(nErrors>0) printf("Correct solution and your solution does not match!\n");
else printf("Your solution is correct!\n");
printf("Checking result for sum of four elements...\n");
nErrors = Compare_results(GPU_sum4, h_input, size, 4);
if(nErrors>0) printf("Correct solution and your solution does not match!\n");
else printf("Your solution is correct!\n");
}
//----------------------------------------------------------------------
// TASK: Write your decimation GPU kernel
// When writing your kernel think carefully what each thread needs to read
// and what is it writes out and to where.
// Also consider what other threads might be reading and writing and where.
// put your kernel here
__global__ void decimate(int *d_In, int *d_Out) {
int index = 2 * (blockIdx.x*blockDim.x + threadIdx
//----------------------------------------------------------------------
int main(void) {
// Size of the array
size_t N = 67108864;
// Declaration of arrays used in this practical
int *h_GPU_sum2, *h_GPU_sum4, *h_input;
// memory allocation
h_GPU_sum2 = (int*) malloc(N*sizeof(*h_GPU_sum2));
h_GPU_sum4 = (int*) malloc(N*sizeof(*h_GPU_sum4));
h_input = (int*) malloc(N*sizeof(*h_input));
if(h_GPU_sum2==NULL || h_GPU_sum4==NULL || h_input==NULL) return(1);
// initiate host data
for(size_t f=0; f<N; f++) {
h_input[f] = f;
}
//----------------------------------------------------------------------
// TASK: Write host code
// Do not forget to initialize the device.
// Remember that configuration of your grid depends on how you have
// written your decimation kernel
// finish the host code here
//----------------------------------------------------------------------
Check_errors(h_GPU_sum2, h_GPU_sum4, h_input, N);
// free memory on the host and the device
free(h_GPU_sum2);
free(h_GPU_sum4);
free(h_input);
hipDeviceReset();
return (0);
}
| 3cbda6421315d5fc44c6561a9310420d865a63df.cu | // In this assignment you will write a kernel for decimation of an integer
// array by factor of two (which sums two neighbouring integer numbers into
// one). More description is in notes for this practical.
// Your tasks are:
// 1) to calculate decimation by factor of two using GPU and store
// the result to the host.
// 2) to calculate decimation by factor of four using GPU and store
// the result to the host.
// To do that you have to:
// 1) write the host code to manage memory, data initialization and data
// transfers
// 2) write GPU kernel which is appropriate for the tasks
// 3) configure grid for your GPU kernel
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
int Compare_results(int *GPU_output, int *h_input, size_t size, int DIT_factor){
int itemp;
size_t half_size;
int error = 0;
int nErrors = 0;
int cislo = 0;
half_size = size/DIT_factor;
for(size_t f=0; f<half_size; f++){
itemp=0;
for(int t=0; t<DIT_factor; t++){
itemp = itemp + h_input[f*DIT_factor + t];
}
error = (GPU_output[f]-itemp);
if(error!=0 && cislo<20) {
printf("f: %d; error: %d; GPU: %d; CPU: %d\n", f, error, GPU_output[f], itemp);
cislo++;
}
if(error!=0) nErrors++;
}
return(nErrors);
}
void Check_errors(int *GPU_sum2, int *GPU_sum4, int *h_input, size_t size){
int nErrors = 0;
printf("Checking result for sum of two elements...\n");
nErrors = Compare_results(GPU_sum2, h_input, size, 2);
if(nErrors>0) printf("Correct solution and your solution does not match!\n");
else printf("Your solution is correct!\n");
printf("Checking result for sum of four elements...\n");
nErrors = Compare_results(GPU_sum4, h_input, size, 4);
if(nErrors>0) printf("Correct solution and your solution does not match!\n");
else printf("Your solution is correct!\n");
}
//----------------------------------------------------------------------
// TASK: Write your decimation GPU kernel
// When writing your kernel think carefully what each thread needs to read
// and what is it writes out and to where.
// Also consider what other threads might be reading and writing and where.
// put your kernel here
__global__ void decimate(int *d_In, int *d_Out) {
int index = 2 * (blockIdx.x*blockDim.x + threadIdx
//----------------------------------------------------------------------
int main(void) {
// Size of the array
size_t N = 67108864;
// Declaration of arrays used in this practical
int *h_GPU_sum2, *h_GPU_sum4, *h_input;
// memory allocation
h_GPU_sum2 = (int*) malloc(N*sizeof(*h_GPU_sum2));
h_GPU_sum4 = (int*) malloc(N*sizeof(*h_GPU_sum4));
h_input = (int*) malloc(N*sizeof(*h_input));
if(h_GPU_sum2==NULL || h_GPU_sum4==NULL || h_input==NULL) return(1);
// initiate host data
for(size_t f=0; f<N; f++) {
h_input[f] = f;
}
//----------------------------------------------------------------------
// TASK: Write host code
// Do not forget to initialize the device.
// Remember that configuration of your grid depends on how you have
// written your decimation kernel
// finish the host code here
//----------------------------------------------------------------------
Check_errors(h_GPU_sum2, h_GPU_sum4, h_input, N);
// free memory on the host and the device
free(h_GPU_sum2);
free(h_GPU_sum4);
free(h_input);
cudaDeviceReset();
return (0);
}
|
a1daeb16c19e6000846bc3bb404e0384ac599acc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This example demonstrates a parallel sum reduction
// using two kernel launches
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
extern __shared__ float sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
int main(void)
{
// create array of 256k elements
const int num_elements = 1<<8;
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
// move input to device memory
float *d_input = 0;
hipMalloc((void**)&d_input, sizeof(float) * num_elements);
hipMemcpy(d_input, &h_input[0], sizeof(float) * num_elements, hipMemcpyHostToDevice);
const size_t block_size = 512;
const size_t num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
// allocate space to hold one partial sum per block, plus one additional
// slot to store the total sum
float *d_partial_sums_and_total = 0;
hipMalloc((void**)&d_partial_sums_and_total, sizeof(float) * (num_blocks + 1));
// launch one kernel to compute, per-block, a partial sum
hipLaunchKernelGGL(( block_sum), dim3(num_blocks),dim3(block_size),block_size * sizeof(float), 0, d_input, d_partial_sums_and_total, num_elements);
// launch a single block to compute the sum of the partial sums
hipLaunchKernelGGL(( block_sum), dim3(1),dim3(num_blocks),num_blocks * sizeof(float), 0, d_partial_sums_and_total, d_partial_sums_and_total + num_blocks, num_blocks);
// copy the result back to the host
float device_result = 0;
hipMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// deallocate device memory
hipFree(d_input);
hipFree(d_partial_sums_and_total);
return 0;
} | a1daeb16c19e6000846bc3bb404e0384ac599acc.cu | // This example demonstrates a parallel sum reduction
// using two kernel launches
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
extern __shared__ float sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
int main(void)
{
// create array of 256k elements
const int num_elements = 1<<8;
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
// move input to device memory
float *d_input = 0;
cudaMalloc((void**)&d_input, sizeof(float) * num_elements);
cudaMemcpy(d_input, &h_input[0], sizeof(float) * num_elements, cudaMemcpyHostToDevice);
const size_t block_size = 512;
const size_t num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
// allocate space to hold one partial sum per block, plus one additional
// slot to store the total sum
float *d_partial_sums_and_total = 0;
cudaMalloc((void**)&d_partial_sums_and_total, sizeof(float) * (num_blocks + 1));
// launch one kernel to compute, per-block, a partial sum
block_sum<<<num_blocks,block_size,block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// launch a single block to compute the sum of the partial sums
block_sum<<<1,num_blocks,num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total + num_blocks, num_blocks);
// copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
} |
d7a866643c936c5e3fd28f5d7ab4628e4b2ec75f.hip | // !!! This is a file automatically generated by hipify!!!
#include <memory>
#include <atomic>
#include "utils.h"
int nCudaDevices = 0;
std::atomic_flag *cudaDeviceLocks = NULL;
semaphore *cudaDeviceAccessSemaphore;
extern "C" bool initLibSort(void)
{
hipError_t res;
if(cudaDeviceLocks != NULL) {
fprintf(stderr, "Libsort: attempted to initialize multiple times!/n");
return false;
}
res = hipGetDeviceCount(&nCudaDevices);
if(res != hipSuccess) {
fprintf(stderr, "Failed to get device count (%d): %s\n", nCudaDevices, hipGetErrorString(res));
return false;
}
cudaDeviceLocks = new std::atomic_flag[nCudaDevices];
for(int i = 0; i < nCudaDevices; i++) {
cudaDeviceLocks[i].clear();
}
cudaDeviceAccessSemaphore = new semaphore(nCudaDevices);
return true;
}
bool cudaReservation::releaseDevice(void) {
if(deviceID >= 0) {
cudaDeviceAccessSemaphore->up();
cudaDeviceLocks[deviceID].clear();
}
return true;
}
bool cudaReservation::reserveDevice(void) {
cudaDeviceAccessSemaphore->down();
// After the semaphore, at least one device is guaranteed to be available
// (one of these CAS's will succeed)
for(int i = 0; i < nCudaDevices; i++) {
if(!cudaDeviceLocks[i].test_and_set()) {
auto res = hipSetDevice(i);
if(res != hipSuccess) {
fprintf(stderr, "Failed to set current devide to %d: %s\n", i, hipGetErrorString(res));
return false;
}
deviceID = i;
return true;
}
}
fprintf(stderr, "Failed to find available device (this shouldn't happen)");
return false;
}
// from https://en.wikipedia.org/wiki/Permuted_congruential_generator#Example_code
// using rand() is an order of magnitude slower and doesn't generate all 32bits.
#define rotr32(x, r) (x >> r | x << (-r & 31))
extern "C" void populateInput(uint32_t *arr, size_t nelem) {
static uint64_t state = 0x4d595df4d0f33173;
static uint64_t const multiplier = 6364136223846793005u;
static uint64_t const increment = 1442695040888963407u;
for(size_t i = 0; i < nelem; i++) {
uint64_t x = state;
unsigned count = (unsigned)(x >> 59);
state = x * multiplier + increment;
x ^= x >> 18;
arr[i] = rotr32((uint32_t)(x >> 27), count);
}
}
| d7a866643c936c5e3fd28f5d7ab4628e4b2ec75f.cu | #include <memory>
#include <atomic>
#include "utils.h"
int nCudaDevices = 0;
std::atomic_flag *cudaDeviceLocks = NULL;
semaphore *cudaDeviceAccessSemaphore;
extern "C" bool initLibSort(void)
{
cudaError_t res;
if(cudaDeviceLocks != NULL) {
fprintf(stderr, "Libsort: attempted to initialize multiple times!/n");
return false;
}
res = cudaGetDeviceCount(&nCudaDevices);
if(res != cudaSuccess) {
fprintf(stderr, "Failed to get device count (%d): %s\n", nCudaDevices, cudaGetErrorString(res));
return false;
}
cudaDeviceLocks = new std::atomic_flag[nCudaDevices];
for(int i = 0; i < nCudaDevices; i++) {
cudaDeviceLocks[i].clear();
}
cudaDeviceAccessSemaphore = new semaphore(nCudaDevices);
return true;
}
bool cudaReservation::releaseDevice(void) {
if(deviceID >= 0) {
cudaDeviceAccessSemaphore->up();
cudaDeviceLocks[deviceID].clear();
}
return true;
}
bool cudaReservation::reserveDevice(void) {
cudaDeviceAccessSemaphore->down();
// After the semaphore, at least one device is guaranteed to be available
// (one of these CAS's will succeed)
for(int i = 0; i < nCudaDevices; i++) {
if(!cudaDeviceLocks[i].test_and_set()) {
auto res = cudaSetDevice(i);
if(res != cudaSuccess) {
fprintf(stderr, "Failed to set current devide to %d: %s\n", i, cudaGetErrorString(res));
return false;
}
deviceID = i;
return true;
}
}
fprintf(stderr, "Failed to find available device (this shouldn't happen)");
return false;
}
// from https://en.wikipedia.org/wiki/Permuted_congruential_generator#Example_code
// using rand() is an order of magnitude slower and doesn't generate all 32bits.
#define rotr32(x, r) (x >> r | x << (-r & 31))
extern "C" void populateInput(uint32_t *arr, size_t nelem) {
static uint64_t state = 0x4d595df4d0f33173;
static uint64_t const multiplier = 6364136223846793005u;
static uint64_t const increment = 1442695040888963407u;
for(size_t i = 0; i < nelem; i++) {
uint64_t x = state;
unsigned count = (unsigned)(x >> 59);
state = x * multiplier + increment;
x ^= x >> 18;
arr[i] = rotr32((uint32_t)(x >> 27), count);
}
}
|
85567e33d0c24463b098ebe02a046f613ba4280a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void div_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] / b[i];
}
} | 85567e33d0c24463b098ebe02a046f613ba4280a.cu | #include "includes.h"
__global__ void div_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] / b[i];
}
} |
5b78bbc536860f072890918e7a4807774ac1d325.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GPUSolver.h"
/** The number of azimuthal angles */
__constant__ int num_azim[1];
/** The number of energy groups */
__constant__ int num_groups[1];
/** The number of FSRs */
__constant__ int num_FSRs[1];
/** The number of polar angles */
__constant__ int num_polar[1];
/** Twice the number of polar angles */
__constant__ int two_times_num_polar[1];
/** The number of polar angles times energy groups */
__constant__ int polar_times_groups[1];
/** An array for the sines of the polar angle in the polar Quadrature set */
__constant__ FP_PRECISION sinthetas[MAX_POLAR_ANGLES];
/** An array of the weights for the polar angles from the Quadrature set */
__constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES*MAX_AZIM_ANGLES];
/** A pointer to an array with the number of tracks per azimuthal angle */
__constant__ int num_tracks[MAX_AZIM_ANGLES/2];
/** The total number of Tracks */
__constant__ int tot_num_tracks[1];
/** A boolean indicating whether or not to use linear interpolation
* to comptue the exponential in the transport equation */
__constant__ bool interpolate_exponential[1];
/** The maximum index of the exponential linear interpolation table */
__constant__ int exp_table_max_index[1];
/** The spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION exp_table_spacing[1];
/** The inverse spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION inverse_exp_table_spacing[1];
/**
* @brief Fast method to round a single precision floating point value
* to an integer on the GPU.
* @param x float floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(float x) {
return __float2int_rd(x);
}
/**
* @brief Fast method to round a double precision floating point value
* to an integer on the GPU.
* @param x double floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(double x) {
return __double2int_rd(x);
}
/**
* @brief Compute the total fission source from all FSRs on the GPU.
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_materials on the device
* @param scalar_flux the scalar flux in each FSR and energy group
* @param fission_sources array of fission sources in each FSR and energy group
*/
__global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* fission_sources) {
/* Use a shared memory buffer for each thread's fission source */
extern __shared__ FP_PRECISION shared_fission_source[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION volume;
FP_PRECISION source;
/* Initialize fission source to zero */
shared_fission_source[threadIdx.x] = 0;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
volume = FSR_volumes[tid];
/* Iterate over energy groups and update fission source for
* this thread block */
for (int e=0; e < *num_groups; e++) {
source = nu_sigma_f[e] * scalar_flux(tid,e) * volume;
shared_fission_source[threadIdx.x] += source;
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission source to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
fission_sources[tid] = shared_fission_source[threadIdx.x];
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
* @param scalar_flux an array of the FSR scalar fluxes
* @param boundary_flux an array of the Track boundary fluxes
* @param norm_factor the normalization factor
*/
__global__ void normalizeFluxesOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION norm_factor) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize scalar fluxes for each FSR */
while(tid < *num_FSRs) {
for (int e=0; e < *num_groups; e++)
scalar_flux(tid,e) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize angular boundary fluxes for each Track */
while(tid < *tot_num_tracks) {
for (int pe2=0; pe2 < 2*(*polar_times_groups); pe2++)
boundary_flux(tid,pe2) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the total source (fission and scattering) in each FSR
* on the GPU.
* @details This method computes the total source in each region based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param source an array of FSR sources from this iteration
* @param old_source an array of current FSR sources from previous iteration
* @param reduced_source an array of FSR sources / total xs
* @param inverse_k_eff the inverse of keff
* @param source_residuals an array of the FSR source residuals
* @return the residual between this source and the previous source
*/
__global__ void computeFSRSourcesOnDevice(int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* source,
FP_PRECISION* old_source,
FP_PRECISION* reduced_source,
FP_PRECISION inverse_k_eff,
FP_PRECISION* source_residuals) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Reset the residual for the old and new fission sources to zero */
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] = 0.0;
FP_PRECISION fission_source;
FP_PRECISION scatter_source;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_s;
FP_PRECISION* sigma_t;
FP_PRECISION* chi;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_s = curr_material->_sigma_s;
sigma_t = curr_material->_sigma_t;
chi = curr_material->_chi;
/* Initialize the fission source to zero for this FSR */
fission_source = 0;
/* Compute total fission source for current region */
for (int e=0; e < *num_groups; e++)
fission_source += scalar_flux(tid,e) * nu_sigma_f[e];
/* Compute total scattering source for this FSR in group G */
for (int G=0; G < *num_groups; G++) {
scatter_source = 0;
for (int g=0; g < *num_groups; g++)
scatter_source += sigma_s[G*(*num_groups)+g] * scalar_flux(tid,g);
/* Set the total source for this FSR in this group */
source(tid,G) = (inverse_k_eff * fission_source * chi[G] +
scatter_source) * ONE_OVER_FOUR_PI;
reduced_source(tid,G) = __fdividef(source(tid,G), sigma_t[G]);
/* Compute the norm of residuals of the sources for convergence */
if (fabs(source(tid,G)) > 1E-10)
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] +=
pow((source(tid,G) - old_source(tid,G)) / source(tid,G), 2);
/* Update the old source */
old_source(tid,G) = source(tid,G);
}
/* Increment the thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Compute the total fission source from all FSRs and energy groups
* on the GPU.
* @param FSR_volumes an array of the FSR volumes
* @param FSR_materials an array of the FSR Material UIDs
* @param materials an array of the dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param tot_absorption an array of FSR absorption rates
* @param tot_fission an array of FSR fission rates
*/
__global__ void computeFissionAndAbsorption(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* tot_absorption,
FP_PRECISION* tot_fission) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_a;
FP_PRECISION volume;
FP_PRECISION absorption = 0.;
FP_PRECISION fission = 0.;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_a = curr_material->_sigma_a;
volume = FSR_volumes[tid];
FP_PRECISION curr_abs = 0.;
FP_PRECISION curr_fission = 0.;
/* Iterate over all energy groups and update fission and absorption
* rates for this thread block */
for (int e=0; e < *num_groups; e++) {
curr_abs += sigma_a[e] * scalar_flux(tid,e);
curr_fission += nu_sigma_f[e] * scalar_flux(tid,e);
}
absorption += curr_abs * volume;
fission += curr_fission * volume;
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission and absorption rates to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
tot_absorption[tid] = absorption;
tot_fission[tid] = fission;
return;
}
/**
* @brief Perform an atomic addition in double precision to an array address
* on the GPU.
* @details This method is straight out of CUDA C Developers Guide (cc 2013).
* @param address the array memory address
* @param val the value to add to the array
* @return the atomically added array value and input value
*/
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* @brief Computes the exponential term in the transport equation for a
* Track segment on the GPU.
* @details This method computes \f$ 1 - exp(-l\Sigma^T_g/sin(\theta_p)) \f$
* for a segment with total group cross-section and for
* some polar angle.
* @param sigma_t the total group cross-section at this energy
* @param length the length of the line segment projected in the xy-plane
* @param _exp_table the exponential linear interpolation table
* @param p the polar angle index
* @return the evaluated exponential
*/
__device__ FP_PRECISION computeExponential(FP_PRECISION sigma_t,
FP_PRECISION length,
FP_PRECISION* _exp_table,
int p) {
FP_PRECISION exponential;
FP_PRECISION tau = sigma_t * length;
/* Evaluate the exponential using the linear interpolation table */
if (*interpolate_exponential) {
int index;
index = round_to_int(tau * (*inverse_exp_table_spacing));
index *= (*two_times_num_polar);
exponential = (1. - (_exp_table[index+2 * p] * tau +
_exp_table[index + 2 * p +1]));
}
/* Evalute the exponential using the intrinsic exp(...) function */
else {
FP_PRECISION sintheta = sinthetas[p];
#ifdef SINGLE
exponential = 1.0 - __expf(- tau / sintheta);
#else
exponential = 1.0 - exp(- tau / sintheta);
#endif
}
return exponential;
}
/**
* @brief Computes the contribution to the FSR scalar flux from a Track segment
* in a single energy group on the GPU.
* @details This method integrates the angular flux for a Track segment across
* energy groups and polar angles, and tallies it into the FSR scalar
* flux, and updates the Track's angular flux.
* @param curr_segment a pointer to the Track segment of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param energy_group the energy group of interest
* @param materials the array of dev_material pointers
* @param track_flux a pointer to the Track's angular flux
* @param reduced_source the array of FSR sources / total xs
* @param polar_weights the array of polar Quadrature weights
* @param _exp_table the exponential interpolation table
* @param scalar_flux the array of FSR scalar fluxes
*/
__device__ void scalarFluxTally(dev_segment* curr_segment,
int azim_index,
int energy_group,
dev_material* materials,
FP_PRECISION* track_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* polar_weights,
FP_PRECISION* _exp_table,
FP_PRECISION* scalar_flux) {
int fsr_id = curr_segment->_region_uid;
FP_PRECISION length = curr_segment->_length;
dev_material* curr_material = &materials[curr_segment->_material_uid];
FP_PRECISION *sigma_t = curr_material->_sigma_t;
/* The change in angular flux long this Track segment in this FSR */
FP_PRECISION delta_psi;
FP_PRECISION exponential;
/* Zero the FSR scalar flux contribution from this segment and energy group */
FP_PRECISION fsr_flux = 0.0;
/* Compute the exponential interpolation table index */
/* Loop over polar angles */
for (int p=0; p < *num_polar; p++) {
exponential = computeExponential(sigma_t[energy_group],
length, _exp_table, p);
delta_psi = (track_flux[p] - reduced_source(fsr_id,energy_group)) *
exponential;
fsr_flux += delta_psi * polar_weights(azim_index,p);
track_flux[p] -= delta_psi;
}
/* Atomically increment the scalar flux for this FSR */
atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux);
}
/**
* @brief Updates the boundary flux for a Track given boundary conditions
* on the GPU.
* @details For reflective boundary conditions, the outgoing boundary flux
* for the Track is given to the reflecting track. For vacuum
* boundary conditions, the outgoing flux tallied as leakage.
* Note: Only one energy group is transferred by this routine.
* @param curr_track a pointer to the Track of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param track_flux an array of the outgoing Track flux
* @param boundary_flux an array of all angular fluxes
* @param leakage an array of leakages for each CUDA thread
* @param polar_weights an array of polar Quadrature weights
* @param energy_angle_index the energy group index
* @param direction the Track direction (forward - true, reverse - false)
*/
__device__ void transferBoundaryFlux(dev_track* curr_track,
int azim_index,
FP_PRECISION* track_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* leakage,
FP_PRECISION* polar_weights,
int energy_angle_index,
bool direction) {
int start = energy_angle_index;
bool bc;
int track_out_id;
/* Extract boundary conditions for this Track and the pointer to the
* outgoing reflective Track, and index into the leakage array */
/* For the "forward" direction */
if (direction) {
bc = curr_track->_bc_out;
track_out_id = curr_track->_track_out;
start += curr_track->_refl_out * (*polar_times_groups);
}
/* For the "reverse" direction */
else {
bc = curr_track->_bc_in;
track_out_id = curr_track->_track_in;
start += curr_track->_refl_in * (*polar_times_groups);
}
FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start);
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
track_out_flux[p] = track_flux[p] * bc;
leakage[0] += track_flux[p] * polar_weights(azim_index,p) * (!bc);
}
}
/**
* @brief This method performs one transport sweep of one halfspace of all
* azimuthal angles, tracks, segments, polar angles and energy groups
* on the GPU.
* @details The method integrates the flux along each track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each FSR.
* @param scalar_flux an array of FSR scalar fluxes
* @param boundary_flux an array of Track boundary fluxes
* @param reduced_source an array of FSR sources / total xs
* @param leakage an array of angular flux leakaages
* @param materials an array of dev_material pointers
* @param tracks an array of Tracks
* @param _exp_table an array for the exponential interpolation table
* @param tid_offset the Track offset for azimuthal angle halfspace
* @param tid_max the upper bound on the Track IDs for this azimuthal
* angle halfspace
*/
__global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* leakage,
dev_material* materials,
dev_track* tracks,
FP_PRECISION* _exp_table,
int tid_offset,
int tid_max) {
/* Shared memory buffer for each thread's angular flux */
extern __shared__ FP_PRECISION temp_flux[];
FP_PRECISION* track_flux;
int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x;
int track_id = tid / *num_groups;
int track_flux_index = threadIdx.x * (*two_times_num_polar);
int energy_group = tid % (*num_groups);
int energy_angle_index = energy_group * (*num_polar);
dev_track* curr_track;
int azim_index;
int num_segments;
dev_segment* curr_segment;
/* Iterate over Track with azimuthal angles in (0, pi/2) */
while (track_id < tid_max) {
/* Initialize local registers with important data */
curr_track = &tracks[track_id];
azim_index = curr_track->_azim_angle_index;
num_segments = curr_track->_num_segments;
/* Retrieve pointer to thread's shared memory buffer for angular flux */
track_flux = &temp_flux[track_flux_index];
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
/* Forward flux along this Track */
track_flux[p] = boundary_flux(track_id,p+energy_angle_index);
/* Reverse flux along this Track */
track_flux[(*num_polar) + p] =
boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups));
}
/* Loop over each Track segment in forward direction */
for (int i=0; i < num_segments; i++) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, true);
/* Loop over each Track segment in reverse direction */
track_flux = &temp_flux[track_flux_index + (*num_polar)];
for (int i=num_segments-1; i > -1; i--) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, false);
/* Update the indices for this thread to the next Track, energy group */
tid += blockDim.x * gridDim.x;
track_id = tid / *num_groups;
energy_group = tid % (*num_groups);
energy_angle_index = energy_group * (*num_polar);
}
return;
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux on the GPU.
* @param scalar_flux an array of FSR scalar fluxes
* @param reduced_source an array of FSR sources / total xs
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
*/
__global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
FP_PRECISION volume;
dev_material* curr_material;
FP_PRECISION* sigma_t;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
volume = FSR_volumes[tid];
sigma_t = curr_material->_sigma_t;
/* Iterate over all energy groups */
for (int i=0; i < *num_groups; i++) {
scalar_flux(tid,i) *= 0.5;
scalar_flux(tid,i) = FOUR_PI * reduced_source(tid,i) +
__fdividef(scalar_flux(tid,i), (sigma_t[i] * volume));
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID on the GPU.
* @details This is a helper method for the
* GPUSolver::computeFSRFissionRates(...) method.
* @param fission_rates an array to store the fission rates
* @param fission_rates an array in which to store the FSR fission rates
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
*/
__global__ void computeFSRFissionRatesOnDevice(double* fission_rates,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* sigma_f;
/* Loop over all FSRs and compute the volume-weighted fission rate */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
sigma_f = curr_material->_sigma_f;
/* Initialize the fission rate for this FSR to zero */
fission_rates[tid] = 0.0;
for (int i=0; i < *num_groups; i++)
fission_rates[tid] += sigma_f[i] * scalar_flux(tid,i);
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Constructor initializes arrays for dev_tracks and dev_materials..
* @details The constructor retrieves the number of energy groups and FSRs
* and azimuthal angles from the Geometry and TrackGenerator if
* passed in as parameters by the user. The constructor initalizes
* the number of CUDA threads and thread blocks each to a default
* of 64.
* @param geometry an optional pointer to the Geometry
* @param track_generator an optional pointer to the TrackjGenerator
*/
GPUSolver::GPUSolver(Geometry* geometry, TrackGenerator* track_generator) :
Solver(geometry, track_generator) {
/* The default number of thread blocks and threads per thread block */
_B = 64;
_T = 64;
_materials = NULL;
_dev_tracks = NULL;
_tot_absorption = NULL;
_tot_fission = NULL;
_leakage = NULL;
if (track_generator != NULL)
setTrackGenerator(track_generator);
if (geometry != NULL)
setGeometry(geometry);
}
/**
* @brief Solver destructor frees all memory on the device, including arrays
* for the FSR scalar fluxes and sources and Track boundary fluxes.
*/
GPUSolver::~GPUSolver() {
if (_FSR_volumes != NULL) {
hipFree(_FSR_volumes);
_FSR_volumes = NULL;
}
if (_FSR_materials != NULL) {
hipFree(_FSR_materials);
_FSR_materials = NULL;
}
if (_materials != NULL) {
hipFree(_materials);
_materials = NULL;
}
if (_dev_tracks != NULL) {
hipFree(_dev_tracks);
_dev_tracks = NULL;
}
if (_boundary_flux != NULL) {
hipFree(_boundary_flux);
_boundary_flux = NULL;
}
if (_scalar_flux != NULL) {
hipFree(_scalar_flux);
_scalar_flux = NULL;
}
if (_source != NULL) {
hipFree(_source);
_source = NULL;
}
if (_old_source != NULL) {
hipFree(_old_source);
_old_source = NULL;
}
if (_reduced_source != NULL) {
hipFree(_reduced_source);
_reduced_source = NULL;
}
if (_fission_sources != NULL) {
_fission_sources_vec.clear();
_fission_sources = NULL;
}
if (_tot_absorption != NULL) {
_tot_absorption_vec.clear();
_tot_absorption = NULL;
}
if (_tot_fission != NULL) {
_tot_fission_vec.clear();
_tot_fission = NULL;
}
if (_source_residuals != NULL) {
_source_residuals_vec.clear();
_source_residuals = NULL;
}
if (_leakage != NULL) {
_leakage_vec.clear();
_leakage = NULL;
}
if (_exp_table != NULL) {
hipFree(_exp_table);
_exp_table = NULL;
}
}
/**
* @brief Returns the number of thread blocks to execute on the GPU.
* @return the number of thread blocks
*/
int GPUSolver::getNumThreadBlocks() {
return _B;
}
/**
* @brief Returns the number of threads per block to execute on the GPU.
* @return the number of threads per block
*/
int GPUSolver::getNumThreadsPerBlock() {
return _T;
}
/**
* @brief Returns the FSR scalar flux for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRScalarFlux(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater "
"than or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the scalar flux for this FSR and energy group from the device */
FP_PRECISION fsr_scalar_flux;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
hipMemcpy((void*)&fsr_scalar_flux, (void*)&_scalar_flux[flux_index],
sizeof(FP_PRECISION), hipMemcpyDeviceToHost);
return fsr_scalar_flux;
}
/**
* @brief Return the scalar flux array indexed by FSR IDs and energy groups.
* which contains the corresponding fluxes for each flat source region.
* @return an array of FSR scalar fluxes
*/
FP_PRECISION* GPUSolver::getFSRScalarFluxes() {
if (_scalar_flux == NULL)
log_printf(ERROR, "Unable to returns the GPUSolver's scalar flux "
"array since it has not yet been allocated in memory");
/* Copy the scalar flux for all FSRs from the device to the host */
FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_FSRs * _num_groups];
hipMemcpy((void*)fsr_scalar_fluxes, (void*)_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION),
hipMemcpyDeviceToHost);
return fsr_scalar_fluxes;
}
/**
* @brief Returns the FSR source for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater than "
"or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the source for this FSR and energy group from the device */
FP_PRECISION fsr_source;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
hipMemcpy((void*)&fsr_source, (void*)&_source[flux_index],
sizeof(FP_PRECISION), hipMemcpyDeviceToHost);
return fsr_source;
}
/**
* @brief Sets the number of thread blocks (>0) for CUDA kernels.
* @param num_blocks the number of thread blocks
*/
void GPUSolver::setNumThreadBlocks(int num_blocks) {
if (num_blocks < 0)
log_printf(ERROR, "Unable to set the number of CUDA thread blocks "
"to %d since it is a negative number", num_blocks);
_B = num_blocks;
}
/**
* @brief Sets the number of threads per block (>0) for CUDA kernels.
* @param num_threads the number of threads per block
*/
void GPUSolver::setNumThreadsPerBlock(int num_threads) {
if (num_threads < 0)
log_printf(ERROR, "Unable to set the number of CUDA threads per block "
"to %d since it is a negative number", num_threads);
_T = num_threads;
}
/**
* @brief Sets the Geometry pointer for the GPUSolver.
* @details The Geometry must already have initialized FSR offset maps
* and segmentized the TrackGenerator's tracks. Each of these
* should be initiated in Python prior to assigning a Geometry
* to the GPUSolver:
*
* @code
* geometry.initializeFlatSourceRegions()
* track_generator.generateTracks()
* @endcode
*
* @param geometry a pointer to a Geometry
*/
void GPUSolver::setGeometry(Geometry* geometry) {
Solver::setGeometry(geometry);
initializeMaterials();
/* Copy the number of energy groups to constant memory on the GPU */
hipMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0,
hipMemcpyHostToDevice);
}
/**
* @brief Sets the TrackGenerator with characteristic tracks for the GPUSolver.
* @details The TrackGenerator must already have generated Tracks and have
* used ray tracing to segmentize them across the Geometry. This
* should be initated in Python prior to assigning the TrackGenerator
* to the GPUSolver:
*
* @code
* track_generator.generateTracks()
* @endcode
*
* @param track_generator a pointer to a TrackGenerator
*/
void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) {
Solver::setTrackGenerator(track_generator);
initializeTracks();
}
/**
* @brief Creates a polar Quadrature object for the GPUSolver on the GPU.
*/
void GPUSolver::initializePolarQuadrature() {
log_printf(INFO, "Initializing polar quadrature on the GPU...");
/* Deletes the old Quadrature if one existed */
if (_quad != NULL)
delete _quad;
_quad = new Quadrature(_quadrature_type, _num_polar);
_polar_times_groups = _num_groups * _num_polar;
/* Copy the number of polar angles to constant memory on the GPU */
hipMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Copy twice the number of polar angles to constant memory on the GPU */
hipMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the number of polar angles times energy groups to constant memory
* on the GPU */
hipMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Compute polar times azimuthal angle weights */
if (_polar_weights != NULL)
delete [] _polar_weights;
_polar_weights =
(FP_PRECISION*)malloc(_num_polar * _num_azim * sizeof(FP_PRECISION));
FP_PRECISION* multiples = _quad->getMultiples();
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_polar; j++)
_polar_weights[i*_num_polar+j] = azim_weights[i]*multiples[j]*FOUR_PI;
}
/* Copy the polar weights to constant memory on the GPU */
hipMemcpyToSymbol(polar_weights, (void*)_polar_weights,
_num_polar * _num_azim * sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
}
/**
* @brief Initializes the FSR volumes and dev_materials array on the GPU.
* @details This method assigns each FSR a unique, monotonically increasing
* ID, sets the Material for each FSR, and assigns a volume based on
* the cumulative length of all of the segments inside the FSR.
*/
void GPUSolver::initializeFSRs() {
log_printf(INFO, "Initializing FSRs on the GPU...");
/* Delete old FSRs array if it exists */
if (_FSR_volumes != NULL)
hipFree(_FSR_volumes);
if (_FSR_materials != NULL)
hipFree(_FSR_materials);
/* Allocate memory for all FSR volumes and dev_materials on the device */
try{
/* Allocate memory on device for FSR volumes and Material UIDs */
hipMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION));
hipMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int));
/* Create a temporary FSR array to populate and then copy to device */
FP_PRECISION* temp_FSR_volumes = new FP_PRECISION[_num_FSRs];
/* Get the array indexed by FSR IDs with Material ID values */
int* FSRs_to_materials = _geometry->getFSRtoMaterialMap();
/* Initialize each FSRs volume to 0 to avoid NaNs */
memset(temp_FSR_volumes, FP_PRECISION(0.), _num_FSRs*sizeof(FP_PRECISION));
Track* track;
int num_segments;
segment* curr_segment;
segment* segments;
FP_PRECISION volume;
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
/* Set each FSR's volume by accumulating the total length of all Tracks
* inside the FSR. Iterate over azimuthal angle, Track, Track segment*/
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_tracks[i]; j++) {
track = &_track_generator->getTracks()[i][j];
num_segments = track->getNumSegments();
segments = track->getSegments();
/* Iterate over the Track's segments to update FSR volumes */
for (int s = 0; s < num_segments; s++) {
curr_segment = &segments[s];
volume = curr_segment->_length * azim_weights[i];
temp_FSR_volumes[curr_segment->_region_id] += volume;
}
}
}
/* Copy the temporary array of FSRs to the device */
hipMemcpy((void*)_FSR_volumes, (void*)temp_FSR_volumes,
_num_FSRs * sizeof(FP_PRECISION), hipMemcpyHostToDevice);
hipMemcpy((void*)_FSR_materials, (void*)FSRs_to_materials,
_num_FSRs * sizeof(int), hipMemcpyHostToDevice);
/* Copy the number of FSRs into constant memory on the GPU */
hipMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Free the temporary array of FSRs on the host */
free(temp_FSR_volumes);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSRs "
"on the device. Backtrace:%s", e.what());
}
initializeThrustVectors();
}
/**
* @brief Allocates data on the GPU for all Materials data.
*/
void GPUSolver::initializeMaterials() {
log_printf(INFO, "Initializing materials on the GPU...");
/* Delete old materials array if it exists */
if (_materials != NULL)
hipFree(_materials);
/* Allocate memory for all dev_materials on the device */
try{
std::map<int, Material*> host_materials=_geometry->getMaterials();
std::map<int, Material*>::iterator iter;
/* Iterate through all Materials and clone them as dev_material structs
* on the device */
hipMalloc((void**)&_materials, _num_materials * sizeof(dev_material));
for (iter=host_materials.begin(); iter != host_materials.end(); ++iter)
clone_material_on_gpu(iter->second, &_materials[iter->second->getUid()]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_materials. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory on the GPU for all Tracks in the simulation.
*/
void GPUSolver::initializeTracks() {
log_printf(INFO, "Initializing tracks on the GPU...");
/* Delete old Tracks array if it exists */
if (_dev_tracks != NULL)
hipFree(_dev_tracks);
/* Allocate memory for all Tracks and Track offset indices on the device */
try{
/* Allocate array of dev_tracks */
hipMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track));
/* Iterate through all Tracks and clone them as dev_tracks on the device */
int index;
for (int i=0; i < _tot_num_tracks; i++) {
clone_track_on_gpu(_tracks[i], &_dev_tracks[i]);
/* Make Track reflective */
index = computeScalarTrackIndex(_tracks[i]->getTrackInI(),
_tracks[i]->getTrackInJ());
hipMemcpy((void*)&_dev_tracks[i]._track_in,
(void*)&index, sizeof(int), hipMemcpyHostToDevice);
index = computeScalarTrackIndex(_tracks[i]->getTrackOutI(),
_tracks[i]->getTrackOutJ());
hipMemcpy((void*)&_dev_tracks[i]._track_out,
(void*)&index, sizeof(int), hipMemcpyHostToDevice);
}
/* Copy the array of number of Tracks for each azimuthal angle into
* constant memory on GPU */
hipMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
hipMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the number of azimuthal angles into constant memory on GPU */
hipMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Copy the array of number of Tracks for each azimuthal angles into
* constant memory on GPU */
hipMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
hipMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, hipMemcpyHostToDevice);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_tracks on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for Track boundary angular fluxes and leakages
* and FSR scalar fluxes on the GPU.
* @details Deletes memory for old flux arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeFluxArrays() {
log_printf(INFO, "Initializing flux arrays on the GPU...");
/* Delete old flux arrays if they exist */
if (_boundary_flux != NULL)
hipFree(_boundary_flux);
if (_scalar_flux != NULL)
hipFree(_scalar_flux);
/* Allocate memory for all flux arrays on the device */
try{
hipMalloc((void**)&_boundary_flux,
2*_tot_num_tracks * _polar_times_groups*sizeof(FP_PRECISION));
hipMalloc((void**)&_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's fluxes "
"on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for FSR source arrays on the GPU.
* @details Deletes memory for old source arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeSourceArrays() {
log_printf(INFO, "Initializing source arrays on the GPU...");
/* Delete old sources arrays if they exist */
if (_source != NULL)
hipFree(_source);
if (_old_source != NULL)
hipFree(_old_source);
if (_reduced_source != NULL)
hipFree(_reduced_source);
/* Allocate memory for all source arrays on the device */
try{
hipMalloc((void**)&_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
hipMalloc((void**)&_old_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
hipMalloc((void**)&_reduced_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSR "
"sources array on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Initialize Thrust vectors for the fission and absorption rates,
* source residuals, leakage and fission sources.
*/
void GPUSolver::initializeThrustVectors() {
log_printf(INFO, "Initializing Thrust vectors on the GPU...");
/* Delete old vectors if they exist */
if (_fission_sources != NULL) {
_fission_sources = NULL;
_fission_sources_vec.clear();
}
if (_tot_absorption != NULL) {
_tot_absorption = NULL;
_tot_absorption_vec.clear();
}
if (_tot_fission != NULL) {
_tot_fission = NULL;
_tot_fission_vec.clear();
}
if (_source_residuals != NULL) {
_source_residuals = NULL;
_source_residuals_vec.clear();
}
if (_leakage != NULL) {
_leakage = NULL;
_leakage_vec.clear();
}
/* Allocate memory for fission, absorption and source vectors on device */
try{
/* Allocate fission source array on device */
_fission_sources_vec.resize(_B * _T);
_fission_sources = thrust::raw_pointer_cast(&_fission_sources_vec[0]);
/* Allocate total absorption reaction rate array on device */
_tot_absorption_vec.resize(_B * _T);
_tot_absorption = thrust::raw_pointer_cast(&_tot_absorption_vec[0]);
/* Allocate fission reaction rate array on device */
_tot_fission_vec.resize(_B * _T);
_tot_fission = thrust::raw_pointer_cast(&_tot_fission_vec[0]);
/* Allocate source residual array on device */
_source_residuals_vec.resize(_B * _T);
_source_residuals = thrust::raw_pointer_cast(&_source_residuals_vec[0]);
/* Allocate leakage array on device */
_leakage_vec.resize(_B * _T);
_leakage = thrust::raw_pointer_cast(&_leakage_vec[0]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"Thrust vectors. Backtrace:%s", e.what());
}
}
/**
* @brief This method computes the index for the Track j at azimuthal angle i.
* @details This method is necessary since the array of dev_tracks on the device
* is a 1D array which needs a one-to-one mapping from the 2D jagged
* array of Tracks on the host.
* @param i azimuthal angle number
* @param j the jth track at angle i
* @return an index into the device track array
*/
int GPUSolver::computeScalarTrackIndex(int i, int j) {
int index =0;
int p = 0;
/* Iterate over each azimuthal angle and increment index by the number of
* Tracks at each angle */
while (p < i) {
index += _num_tracks[p];
p++;
}
/* Update index for this Track since it is the jth Track at angle i */
index += j;
return index;
}
/**
* @brief Builds a linear interpolation table to compute exponentials for
* each segment of each Track for each polar angle on the GPU.
*/
void GPUSolver::buildExpInterpTable(){
log_printf(INFO, "Building exponential interpolation table on device...");
/* Copy a boolean indicating whether or not to use the linear interpolation
* table or the exp intrinsic function */
hipMemcpyToSymbol(interpolate_exponential,(void*)&_interpolate_exponential,
sizeof(bool), 0, hipMemcpyHostToDevice);
/* Copy the sines of the polar angles which is needed if the user
* requested the use of the exp intrinsic to evaluate exponentials */
hipMemcpyToSymbol(sinthetas, (void*)_quad->getSinThetas(),
_num_polar * sizeof(FP_PRECISION), 0,
hipMemcpyHostToDevice);
/* Set size of interpolation table */
int num_array_values =
10 * sqrt(1. / (8. * _source_convergence_thresh * 1e-2));
_exp_table_spacing = 10. / num_array_values;
_inverse_exp_table_spacing = 1.0 / _exp_table_spacing;
_exp_table_size = _two_times_num_polar * num_array_values;
_exp_table_max_index = _exp_table_size - _two_times_num_polar - 1;
/* Allocate arrays */
FP_PRECISION* exp_table = new FP_PRECISION[_exp_table_size];
FP_PRECISION expon;
FP_PRECISION intercept;
FP_PRECISION slope;
/* Create exponential interpolation table */
for (int i = 0; i < num_array_values; i ++){
for (int p = 0; p < _num_polar; p++){
expon = exp(- (i * _exp_table_spacing) / _quad->getSinTheta(p));
slope = - expon / _quad->getSinTheta(p);
intercept = expon * (1 + (i * _exp_table_spacing)/_quad->getSinTheta(p));
exp_table[_two_times_num_polar * i + 2 * p] = slope;
exp_table[_two_times_num_polar * i + 2 * p + 1] = intercept;
}
}
/* Allocate memory for the interpolation table on the device */
hipMalloc((void**)&_exp_table, _exp_table_size * sizeof(FP_PRECISION));
/* Copy exponential interpolation table to the device */
hipMemcpy((void*)_exp_table, (void*)exp_table,
_exp_table_size * sizeof(FP_PRECISION),
hipMemcpyHostToDevice);
/* Copy table size and spacing to constant memory on the device */
hipMemcpyToSymbol(exp_table_spacing, (void*)&_exp_table_spacing,
sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(inverse_exp_table_spacing,
(void*)&_inverse_exp_table_spacing,
sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(exp_table_max_index, (void*)&_exp_table_max_index,
sizeof(int), 0, hipMemcpyHostToDevice);
free(exp_table);
return;
}
/**
* @brief Zero each Track's boundary fluxes for each energy group and polar
* angle in the "forward" and "reverse" directions.
*/
void GPUSolver::zeroTrackFluxes() {
int size = 2 * _tot_num_tracks * _num_polar * _num_groups;
size *= sizeof(FP_PRECISION);
hipMemset(_boundary_flux, 0.0, size);
return;
}
/**
* @brief Set the FSR scalar flux for each energy group to some value.
* @param value the value to assign to each FSR scalar flux
*/
void GPUSolver::flattenFSRFluxes(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
hipMemset(_scalar_flux, value, size);
return;
}
/**
* @brief Set the FSR source for each energy group to some value.
* @param value the value to assign to each FSR source
*/
void GPUSolver::flattenFSRSources(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
hipMemset(_source, value, size);
hipMemset(_old_source, value, size);
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
*/
void GPUSolver::normalizeFluxes() {
int shared_mem = sizeof(FP_PRECISION) * _T;
hipLaunchKernelGGL(( computeFissionSourcesOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _FSR_volumes,
_FSR_materials,
_materials,
_scalar_flux,
_fission_sources);
FP_PRECISION norm_factor = 1.0 / thrust::reduce(_fission_sources_vec.begin(),
_fission_sources_vec.end());
hipLaunchKernelGGL(( normalizeFluxesOnDevice), dim3(_B), dim3(_T), 0, 0, _scalar_flux, _boundary_flux,norm_factor);
}
/**
* @brief Computes the total source (fission and scattering) in each FSR.
* @details This method computes the total source in each FSR based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @return the residual between this source and the previous source
*/
FP_PRECISION GPUSolver::computeFSRSources() {
hipLaunchKernelGGL(( computeFSRSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials,
_scalar_flux, _source, _old_source,
_reduced_source, 1.0 / _k_eff,
_source_residuals);
FP_PRECISION residual = thrust::reduce(_source_residuals_vec.begin(),
_source_residuals_vec.end());
residual = sqrt(residual / (_num_groups * _num_FSRs));
return residual;
}
/**
* @brief This method performs one transport sweep of all azimuthal angles,
* Tracks, Track segments, polar angles and energy groups.
* @details The method integrates the flux along each Track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each flat source region.
*/
void GPUSolver::transportSweep() {
int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION);
int tid_offset, tid_max;
log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads",
_B, _T);
/* Initialize leakage to zero */
thrust::fill(_leakage_vec.begin(), _leakage_vec.end(), 0.0);
/* Initialize flux in each FSR to zero */
flattenFSRFluxes(0.0);
/* Sweep the first halfspace of azimuthal angle space */
tid_offset = 0;
tid_max = (_tot_num_tracks / 2);
hipLaunchKernelGGL(( transportSweepOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
/* Sweep the second halfspace of azimuthal angle space */
tid_offset = tid_max * _num_groups;
tid_max = _tot_num_tracks;
hipLaunchKernelGGL(( transportSweepOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux.
*/
void GPUSolver::addSourceToScalarFlux() {
hipLaunchKernelGGL(( addSourceToScalarFluxOnDevice), dim3(_B),dim3(_T), 0, 0, _scalar_flux, _reduced_source,
_FSR_volumes, _FSR_materials,
_materials);
}
/**
* @brief Compute \f$ k_{eff} \f$ from the total fission and absorption rates.
* @details This method computes the current approximation to the
* multiplication factor on this iteration as follows:
* \f$ k_{eff} = \frac{\displaystyle\sum \displaystyle\sum \nu
* \Sigma_f \Phi V}{\displaystyle\sum
* \displaystyle\sum \Sigma_a \Phi V} \f$
*/
void GPUSolver::computeKeff() {
FP_PRECISION tot_absorption;
FP_PRECISION tot_fission;
FP_PRECISION tot_leakage;
/* Compute the total fission and absorption rates on the device.
* This kernel stores partial rates in a Thrust vector with as many
* entries as CUDAthreads executed by the kernel */
hipLaunchKernelGGL(( computeFissionAndAbsorption), dim3(_B), dim3(_T), 0, 0, _FSR_volumes, _FSR_materials,
_materials, _scalar_flux,
_tot_absorption, _tot_fission);
hipDeviceSynchronize();
/* Compute the total absorption rate by reducing the partial absorption
* rates compiled in the Thrust vector */
tot_absorption = thrust::reduce(_tot_absorption_vec.begin(),
_tot_absorption_vec.end());
/* Compute the total fission rate by reducing the partial fission
* rates compiled in the Thrust vector */
tot_fission = thrust::reduce(_tot_fission_vec.begin(),_tot_fission_vec.end());
hipMemcpy((void*)&tot_fission, (void*)_tot_fission,
_B * _T * sizeof(FP_PRECISION), hipMemcpyHostToDevice);
/* Compute the total leakage by reducing the partial leakage
* rates compiled in the Thrust vector */
tot_leakage = 0.5 * thrust::reduce(_leakage_vec.begin(), _leakage_vec.end());
/* Compute the new keff from the fission and absorption rates */
_k_eff = tot_fission / (tot_absorption + tot_leakage);
log_printf(DEBUG, "abs = %f, fiss = %f, leak = %f, keff = %f",
tot_absorption, tot_fission, tot_leakage, _k_eff);
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID.
* @details This is a helper method for SWIG to allow users to retrieve
* FSR fission rates as a NumPy array. An example of how this method
* can be called from Python is as follows:
*
* @code
* num_FSRs = geometry.getNumFSRs()
* fission_rates = solver.computeFSRFissionRates(num_FSRs)
* @endcode
*
* @param fission_rates an array to store the fission rates (implicitly passed
* in as a NumPy array from Python)
* @param num_FSRs the number of FSRs passed in from Python
*/
void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) {
log_printf(INFO, "Computing FSR fission rates...");
/* Allocate memory for the FSR fission rates on the device */
double* dev_fission_rates;
hipMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(double));
/* Compute the FSR fission rates on the device */
hipLaunchKernelGGL(( computeFSRFissionRatesOnDevice), dim3(_B),dim3(_T), 0, 0, dev_fission_rates,
_FSR_materials,
_materials,
_scalar_flux);
/* Copy the fission rate array from the device to the host */
hipMemcpy((void*)fission_rates, (void*)dev_fission_rates,
_num_FSRs * sizeof(double), hipMemcpyDeviceToHost);
/* Deallocate the memory assigned to store the fission rates on the device */
hipFree(dev_fission_rates);
return;
}
| 5b78bbc536860f072890918e7a4807774ac1d325.cu | #include "GPUSolver.h"
/** The number of azimuthal angles */
__constant__ int num_azim[1];
/** The number of energy groups */
__constant__ int num_groups[1];
/** The number of FSRs */
__constant__ int num_FSRs[1];
/** The number of polar angles */
__constant__ int num_polar[1];
/** Twice the number of polar angles */
__constant__ int two_times_num_polar[1];
/** The number of polar angles times energy groups */
__constant__ int polar_times_groups[1];
/** An array for the sines of the polar angle in the polar Quadrature set */
__constant__ FP_PRECISION sinthetas[MAX_POLAR_ANGLES];
/** An array of the weights for the polar angles from the Quadrature set */
__constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES*MAX_AZIM_ANGLES];
/** A pointer to an array with the number of tracks per azimuthal angle */
__constant__ int num_tracks[MAX_AZIM_ANGLES/2];
/** The total number of Tracks */
__constant__ int tot_num_tracks[1];
/** A boolean indicating whether or not to use linear interpolation
* to comptue the exponential in the transport equation */
__constant__ bool interpolate_exponential[1];
/** The maximum index of the exponential linear interpolation table */
__constant__ int exp_table_max_index[1];
/** The spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION exp_table_spacing[1];
/** The inverse spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION inverse_exp_table_spacing[1];
/**
* @brief Fast method to round a single precision floating point value
* to an integer on the GPU.
* @param x float floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(float x) {
return __float2int_rd(x);
}
/**
* @brief Fast method to round a double precision floating point value
* to an integer on the GPU.
* @param x double floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(double x) {
return __double2int_rd(x);
}
/**
* @brief Compute the total fission source from all FSRs on the GPU.
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_materials on the device
* @param scalar_flux the scalar flux in each FSR and energy group
* @param fission_sources array of fission sources in each FSR and energy group
*/
__global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* fission_sources) {
/* Use a shared memory buffer for each thread's fission source */
extern __shared__ FP_PRECISION shared_fission_source[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION volume;
FP_PRECISION source;
/* Initialize fission source to zero */
shared_fission_source[threadIdx.x] = 0;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
volume = FSR_volumes[tid];
/* Iterate over energy groups and update fission source for
* this thread block */
for (int e=0; e < *num_groups; e++) {
source = nu_sigma_f[e] * scalar_flux(tid,e) * volume;
shared_fission_source[threadIdx.x] += source;
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission source to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
fission_sources[tid] = shared_fission_source[threadIdx.x];
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
* @param scalar_flux an array of the FSR scalar fluxes
* @param boundary_flux an array of the Track boundary fluxes
* @param norm_factor the normalization factor
*/
__global__ void normalizeFluxesOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION norm_factor) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize scalar fluxes for each FSR */
while(tid < *num_FSRs) {
for (int e=0; e < *num_groups; e++)
scalar_flux(tid,e) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize angular boundary fluxes for each Track */
while(tid < *tot_num_tracks) {
for (int pe2=0; pe2 < 2*(*polar_times_groups); pe2++)
boundary_flux(tid,pe2) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the total source (fission and scattering) in each FSR
* on the GPU.
* @details This method computes the total source in each region based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param source an array of FSR sources from this iteration
* @param old_source an array of current FSR sources from previous iteration
* @param reduced_source an array of FSR sources / total xs
* @param inverse_k_eff the inverse of keff
* @param source_residuals an array of the FSR source residuals
* @return the residual between this source and the previous source
*/
__global__ void computeFSRSourcesOnDevice(int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* source,
FP_PRECISION* old_source,
FP_PRECISION* reduced_source,
FP_PRECISION inverse_k_eff,
FP_PRECISION* source_residuals) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Reset the residual for the old and new fission sources to zero */
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] = 0.0;
FP_PRECISION fission_source;
FP_PRECISION scatter_source;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_s;
FP_PRECISION* sigma_t;
FP_PRECISION* chi;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_s = curr_material->_sigma_s;
sigma_t = curr_material->_sigma_t;
chi = curr_material->_chi;
/* Initialize the fission source to zero for this FSR */
fission_source = 0;
/* Compute total fission source for current region */
for (int e=0; e < *num_groups; e++)
fission_source += scalar_flux(tid,e) * nu_sigma_f[e];
/* Compute total scattering source for this FSR in group G */
for (int G=0; G < *num_groups; G++) {
scatter_source = 0;
for (int g=0; g < *num_groups; g++)
scatter_source += sigma_s[G*(*num_groups)+g] * scalar_flux(tid,g);
/* Set the total source for this FSR in this group */
source(tid,G) = (inverse_k_eff * fission_source * chi[G] +
scatter_source) * ONE_OVER_FOUR_PI;
reduced_source(tid,G) = __fdividef(source(tid,G), sigma_t[G]);
/* Compute the norm of residuals of the sources for convergence */
if (fabs(source(tid,G)) > 1E-10)
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] +=
pow((source(tid,G) - old_source(tid,G)) / source(tid,G), 2);
/* Update the old source */
old_source(tid,G) = source(tid,G);
}
/* Increment the thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Compute the total fission source from all FSRs and energy groups
* on the GPU.
* @param FSR_volumes an array of the FSR volumes
* @param FSR_materials an array of the FSR Material UIDs
* @param materials an array of the dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param tot_absorption an array of FSR absorption rates
* @param tot_fission an array of FSR fission rates
*/
__global__ void computeFissionAndAbsorption(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* tot_absorption,
FP_PRECISION* tot_fission) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_a;
FP_PRECISION volume;
FP_PRECISION absorption = 0.;
FP_PRECISION fission = 0.;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_a = curr_material->_sigma_a;
volume = FSR_volumes[tid];
FP_PRECISION curr_abs = 0.;
FP_PRECISION curr_fission = 0.;
/* Iterate over all energy groups and update fission and absorption
* rates for this thread block */
for (int e=0; e < *num_groups; e++) {
curr_abs += sigma_a[e] * scalar_flux(tid,e);
curr_fission += nu_sigma_f[e] * scalar_flux(tid,e);
}
absorption += curr_abs * volume;
fission += curr_fission * volume;
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission and absorption rates to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
tot_absorption[tid] = absorption;
tot_fission[tid] = fission;
return;
}
/**
* @brief Perform an atomic addition in double precision to an array address
* on the GPU.
* @details This method is straight out of CUDA C Developers Guide (cc 2013).
* @param address the array memory address
* @param val the value to add to the array
* @return the atomically added array value and input value
*/
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* @brief Computes the exponential term in the transport equation for a
* Track segment on the GPU.
* @details This method computes \f$ 1 - exp(-l\Sigma^T_g/sin(\theta_p)) \f$
* for a segment with total group cross-section and for
* some polar angle.
* @param sigma_t the total group cross-section at this energy
* @param length the length of the line segment projected in the xy-plane
* @param _exp_table the exponential linear interpolation table
* @param p the polar angle index
* @return the evaluated exponential
*/
__device__ FP_PRECISION computeExponential(FP_PRECISION sigma_t,
FP_PRECISION length,
FP_PRECISION* _exp_table,
int p) {
FP_PRECISION exponential;
FP_PRECISION tau = sigma_t * length;
/* Evaluate the exponential using the linear interpolation table */
if (*interpolate_exponential) {
int index;
index = round_to_int(tau * (*inverse_exp_table_spacing));
index *= (*two_times_num_polar);
exponential = (1. - (_exp_table[index+2 * p] * tau +
_exp_table[index + 2 * p +1]));
}
/* Evalute the exponential using the intrinsic exp(...) function */
else {
FP_PRECISION sintheta = sinthetas[p];
#ifdef SINGLE
exponential = 1.0 - __expf(- tau / sintheta);
#else
exponential = 1.0 - exp(- tau / sintheta);
#endif
}
return exponential;
}
/**
* @brief Computes the contribution to the FSR scalar flux from a Track segment
* in a single energy group on the GPU.
* @details This method integrates the angular flux for a Track segment across
* energy groups and polar angles, and tallies it into the FSR scalar
* flux, and updates the Track's angular flux.
* @param curr_segment a pointer to the Track segment of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param energy_group the energy group of interest
* @param materials the array of dev_material pointers
* @param track_flux a pointer to the Track's angular flux
* @param reduced_source the array of FSR sources / total xs
* @param polar_weights the array of polar Quadrature weights
* @param _exp_table the exponential interpolation table
* @param scalar_flux the array of FSR scalar fluxes
*/
__device__ void scalarFluxTally(dev_segment* curr_segment,
int azim_index,
int energy_group,
dev_material* materials,
FP_PRECISION* track_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* polar_weights,
FP_PRECISION* _exp_table,
FP_PRECISION* scalar_flux) {
int fsr_id = curr_segment->_region_uid;
FP_PRECISION length = curr_segment->_length;
dev_material* curr_material = &materials[curr_segment->_material_uid];
FP_PRECISION *sigma_t = curr_material->_sigma_t;
/* The change in angular flux long this Track segment in this FSR */
FP_PRECISION delta_psi;
FP_PRECISION exponential;
/* Zero the FSR scalar flux contribution from this segment and energy group */
FP_PRECISION fsr_flux = 0.0;
/* Compute the exponential interpolation table index */
/* Loop over polar angles */
for (int p=0; p < *num_polar; p++) {
exponential = computeExponential(sigma_t[energy_group],
length, _exp_table, p);
delta_psi = (track_flux[p] - reduced_source(fsr_id,energy_group)) *
exponential;
fsr_flux += delta_psi * polar_weights(azim_index,p);
track_flux[p] -= delta_psi;
}
/* Atomically increment the scalar flux for this FSR */
atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux);
}
/**
* @brief Updates the boundary flux for a Track given boundary conditions
* on the GPU.
* @details For reflective boundary conditions, the outgoing boundary flux
* for the Track is given to the reflecting track. For vacuum
* boundary conditions, the outgoing flux tallied as leakage.
* Note: Only one energy group is transferred by this routine.
* @param curr_track a pointer to the Track of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param track_flux an array of the outgoing Track flux
* @param boundary_flux an array of all angular fluxes
* @param leakage an array of leakages for each CUDA thread
* @param polar_weights an array of polar Quadrature weights
* @param energy_angle_index the energy group index
* @param direction the Track direction (forward - true, reverse - false)
*/
__device__ void transferBoundaryFlux(dev_track* curr_track,
int azim_index,
FP_PRECISION* track_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* leakage,
FP_PRECISION* polar_weights,
int energy_angle_index,
bool direction) {
int start = energy_angle_index;
bool bc;
int track_out_id;
/* Extract boundary conditions for this Track and the pointer to the
* outgoing reflective Track, and index into the leakage array */
/* For the "forward" direction */
if (direction) {
bc = curr_track->_bc_out;
track_out_id = curr_track->_track_out;
start += curr_track->_refl_out * (*polar_times_groups);
}
/* For the "reverse" direction */
else {
bc = curr_track->_bc_in;
track_out_id = curr_track->_track_in;
start += curr_track->_refl_in * (*polar_times_groups);
}
FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start);
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
track_out_flux[p] = track_flux[p] * bc;
leakage[0] += track_flux[p] * polar_weights(azim_index,p) * (!bc);
}
}
/**
* @brief This method performs one transport sweep of one halfspace of all
* azimuthal angles, tracks, segments, polar angles and energy groups
* on the GPU.
* @details The method integrates the flux along each track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each FSR.
* @param scalar_flux an array of FSR scalar fluxes
* @param boundary_flux an array of Track boundary fluxes
* @param reduced_source an array of FSR sources / total xs
* @param leakage an array of angular flux leakaages
* @param materials an array of dev_material pointers
* @param tracks an array of Tracks
* @param _exp_table an array for the exponential interpolation table
* @param tid_offset the Track offset for azimuthal angle halfspace
* @param tid_max the upper bound on the Track IDs for this azimuthal
* angle halfspace
*/
__global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* leakage,
dev_material* materials,
dev_track* tracks,
FP_PRECISION* _exp_table,
int tid_offset,
int tid_max) {
/* Shared memory buffer for each thread's angular flux */
extern __shared__ FP_PRECISION temp_flux[];
FP_PRECISION* track_flux;
int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x;
int track_id = tid / *num_groups;
int track_flux_index = threadIdx.x * (*two_times_num_polar);
int energy_group = tid % (*num_groups);
int energy_angle_index = energy_group * (*num_polar);
dev_track* curr_track;
int azim_index;
int num_segments;
dev_segment* curr_segment;
/* Iterate over Track with azimuthal angles in (0, pi/2) */
while (track_id < tid_max) {
/* Initialize local registers with important data */
curr_track = &tracks[track_id];
azim_index = curr_track->_azim_angle_index;
num_segments = curr_track->_num_segments;
/* Retrieve pointer to thread's shared memory buffer for angular flux */
track_flux = &temp_flux[track_flux_index];
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
/* Forward flux along this Track */
track_flux[p] = boundary_flux(track_id,p+energy_angle_index);
/* Reverse flux along this Track */
track_flux[(*num_polar) + p] =
boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups));
}
/* Loop over each Track segment in forward direction */
for (int i=0; i < num_segments; i++) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, true);
/* Loop over each Track segment in reverse direction */
track_flux = &temp_flux[track_flux_index + (*num_polar)];
for (int i=num_segments-1; i > -1; i--) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, false);
/* Update the indices for this thread to the next Track, energy group */
tid += blockDim.x * gridDim.x;
track_id = tid / *num_groups;
energy_group = tid % (*num_groups);
energy_angle_index = energy_group * (*num_polar);
}
return;
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux on the GPU.
* @param scalar_flux an array of FSR scalar fluxes
* @param reduced_source an array of FSR sources / total xs
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
*/
__global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
FP_PRECISION volume;
dev_material* curr_material;
FP_PRECISION* sigma_t;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
volume = FSR_volumes[tid];
sigma_t = curr_material->_sigma_t;
/* Iterate over all energy groups */
for (int i=0; i < *num_groups; i++) {
scalar_flux(tid,i) *= 0.5;
scalar_flux(tid,i) = FOUR_PI * reduced_source(tid,i) +
__fdividef(scalar_flux(tid,i), (sigma_t[i] * volume));
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID on the GPU.
* @details This is a helper method for the
* GPUSolver::computeFSRFissionRates(...) method.
* @param fission_rates an array to store the fission rates
* @param fission_rates an array in which to store the FSR fission rates
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
*/
__global__ void computeFSRFissionRatesOnDevice(double* fission_rates,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* sigma_f;
/* Loop over all FSRs and compute the volume-weighted fission rate */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
sigma_f = curr_material->_sigma_f;
/* Initialize the fission rate for this FSR to zero */
fission_rates[tid] = 0.0;
for (int i=0; i < *num_groups; i++)
fission_rates[tid] += sigma_f[i] * scalar_flux(tid,i);
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Constructor initializes arrays for dev_tracks and dev_materials..
* @details The constructor retrieves the number of energy groups and FSRs
* and azimuthal angles from the Geometry and TrackGenerator if
* passed in as parameters by the user. The constructor initalizes
* the number of CUDA threads and thread blocks each to a default
* of 64.
* @param geometry an optional pointer to the Geometry
* @param track_generator an optional pointer to the TrackjGenerator
*/
GPUSolver::GPUSolver(Geometry* geometry, TrackGenerator* track_generator) :
Solver(geometry, track_generator) {
/* The default number of thread blocks and threads per thread block */
_B = 64;
_T = 64;
_materials = NULL;
_dev_tracks = NULL;
_tot_absorption = NULL;
_tot_fission = NULL;
_leakage = NULL;
if (track_generator != NULL)
setTrackGenerator(track_generator);
if (geometry != NULL)
setGeometry(geometry);
}
/**
* @brief Solver destructor frees all memory on the device, including arrays
* for the FSR scalar fluxes and sources and Track boundary fluxes.
*/
GPUSolver::~GPUSolver() {
if (_FSR_volumes != NULL) {
cudaFree(_FSR_volumes);
_FSR_volumes = NULL;
}
if (_FSR_materials != NULL) {
cudaFree(_FSR_materials);
_FSR_materials = NULL;
}
if (_materials != NULL) {
cudaFree(_materials);
_materials = NULL;
}
if (_dev_tracks != NULL) {
cudaFree(_dev_tracks);
_dev_tracks = NULL;
}
if (_boundary_flux != NULL) {
cudaFree(_boundary_flux);
_boundary_flux = NULL;
}
if (_scalar_flux != NULL) {
cudaFree(_scalar_flux);
_scalar_flux = NULL;
}
if (_source != NULL) {
cudaFree(_source);
_source = NULL;
}
if (_old_source != NULL) {
cudaFree(_old_source);
_old_source = NULL;
}
if (_reduced_source != NULL) {
cudaFree(_reduced_source);
_reduced_source = NULL;
}
if (_fission_sources != NULL) {
_fission_sources_vec.clear();
_fission_sources = NULL;
}
if (_tot_absorption != NULL) {
_tot_absorption_vec.clear();
_tot_absorption = NULL;
}
if (_tot_fission != NULL) {
_tot_fission_vec.clear();
_tot_fission = NULL;
}
if (_source_residuals != NULL) {
_source_residuals_vec.clear();
_source_residuals = NULL;
}
if (_leakage != NULL) {
_leakage_vec.clear();
_leakage = NULL;
}
if (_exp_table != NULL) {
cudaFree(_exp_table);
_exp_table = NULL;
}
}
/**
* @brief Returns the number of thread blocks to execute on the GPU.
* @return the number of thread blocks
*/
int GPUSolver::getNumThreadBlocks() {
return _B;
}
/**
* @brief Returns the number of threads per block to execute on the GPU.
* @return the number of threads per block
*/
int GPUSolver::getNumThreadsPerBlock() {
return _T;
}
/**
* @brief Returns the FSR scalar flux for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRScalarFlux(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater "
"than or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the scalar flux for this FSR and energy group from the device */
FP_PRECISION fsr_scalar_flux;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
cudaMemcpy((void*)&fsr_scalar_flux, (void*)&_scalar_flux[flux_index],
sizeof(FP_PRECISION), cudaMemcpyDeviceToHost);
return fsr_scalar_flux;
}
/**
* @brief Return the scalar flux array indexed by FSR IDs and energy groups.
* which contains the corresponding fluxes for each flat source region.
* @return an array of FSR scalar fluxes
*/
FP_PRECISION* GPUSolver::getFSRScalarFluxes() {
if (_scalar_flux == NULL)
log_printf(ERROR, "Unable to returns the GPUSolver's scalar flux "
"array since it has not yet been allocated in memory");
/* Copy the scalar flux for all FSRs from the device to the host */
FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_FSRs * _num_groups];
cudaMemcpy((void*)fsr_scalar_fluxes, (void*)_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION),
cudaMemcpyDeviceToHost);
return fsr_scalar_fluxes;
}
/**
* @brief Returns the FSR source for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater than "
"or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the source for this FSR and energy group from the device */
FP_PRECISION fsr_source;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
cudaMemcpy((void*)&fsr_source, (void*)&_source[flux_index],
sizeof(FP_PRECISION), cudaMemcpyDeviceToHost);
return fsr_source;
}
/**
* @brief Sets the number of thread blocks (>0) for CUDA kernels.
* @param num_blocks the number of thread blocks
*/
void GPUSolver::setNumThreadBlocks(int num_blocks) {
if (num_blocks < 0)
log_printf(ERROR, "Unable to set the number of CUDA thread blocks "
"to %d since it is a negative number", num_blocks);
_B = num_blocks;
}
/**
* @brief Sets the number of threads per block (>0) for CUDA kernels.
* @param num_threads the number of threads per block
*/
void GPUSolver::setNumThreadsPerBlock(int num_threads) {
if (num_threads < 0)
log_printf(ERROR, "Unable to set the number of CUDA threads per block "
"to %d since it is a negative number", num_threads);
_T = num_threads;
}
/**
* @brief Sets the Geometry pointer for the GPUSolver.
* @details The Geometry must already have initialized FSR offset maps
* and segmentized the TrackGenerator's tracks. Each of these
* should be initiated in Python prior to assigning a Geometry
* to the GPUSolver:
*
* @code
* geometry.initializeFlatSourceRegions()
* track_generator.generateTracks()
* @endcode
*
* @param geometry a pointer to a Geometry
*/
void GPUSolver::setGeometry(Geometry* geometry) {
Solver::setGeometry(geometry);
initializeMaterials();
/* Copy the number of energy groups to constant memory on the GPU */
cudaMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0,
cudaMemcpyHostToDevice);
}
/**
* @brief Sets the TrackGenerator with characteristic tracks for the GPUSolver.
* @details The TrackGenerator must already have generated Tracks and have
* used ray tracing to segmentize them across the Geometry. This
* should be initated in Python prior to assigning the TrackGenerator
* to the GPUSolver:
*
* @code
* track_generator.generateTracks()
* @endcode
*
* @param track_generator a pointer to a TrackGenerator
*/
void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) {
Solver::setTrackGenerator(track_generator);
initializeTracks();
}
/**
* @brief Creates a polar Quadrature object for the GPUSolver on the GPU.
*/
void GPUSolver::initializePolarQuadrature() {
log_printf(INFO, "Initializing polar quadrature on the GPU...");
/* Deletes the old Quadrature if one existed */
if (_quad != NULL)
delete _quad;
_quad = new Quadrature(_quadrature_type, _num_polar);
_polar_times_groups = _num_groups * _num_polar;
/* Copy the number of polar angles to constant memory on the GPU */
cudaMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Copy twice the number of polar angles to constant memory on the GPU */
cudaMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the number of polar angles times energy groups to constant memory
* on the GPU */
cudaMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Compute polar times azimuthal angle weights */
if (_polar_weights != NULL)
delete [] _polar_weights;
_polar_weights =
(FP_PRECISION*)malloc(_num_polar * _num_azim * sizeof(FP_PRECISION));
FP_PRECISION* multiples = _quad->getMultiples();
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_polar; j++)
_polar_weights[i*_num_polar+j] = azim_weights[i]*multiples[j]*FOUR_PI;
}
/* Copy the polar weights to constant memory on the GPU */
cudaMemcpyToSymbol(polar_weights, (void*)_polar_weights,
_num_polar * _num_azim * sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
}
/**
* @brief Initializes the FSR volumes and dev_materials array on the GPU.
* @details This method assigns each FSR a unique, monotonically increasing
* ID, sets the Material for each FSR, and assigns a volume based on
* the cumulative length of all of the segments inside the FSR.
*/
void GPUSolver::initializeFSRs() {
log_printf(INFO, "Initializing FSRs on the GPU...");
/* Delete old FSRs array if it exists */
if (_FSR_volumes != NULL)
cudaFree(_FSR_volumes);
if (_FSR_materials != NULL)
cudaFree(_FSR_materials);
/* Allocate memory for all FSR volumes and dev_materials on the device */
try{
/* Allocate memory on device for FSR volumes and Material UIDs */
cudaMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION));
cudaMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int));
/* Create a temporary FSR array to populate and then copy to device */
FP_PRECISION* temp_FSR_volumes = new FP_PRECISION[_num_FSRs];
/* Get the array indexed by FSR IDs with Material ID values */
int* FSRs_to_materials = _geometry->getFSRtoMaterialMap();
/* Initialize each FSRs volume to 0 to avoid NaNs */
memset(temp_FSR_volumes, FP_PRECISION(0.), _num_FSRs*sizeof(FP_PRECISION));
Track* track;
int num_segments;
segment* curr_segment;
segment* segments;
FP_PRECISION volume;
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
/* Set each FSR's volume by accumulating the total length of all Tracks
* inside the FSR. Iterate over azimuthal angle, Track, Track segment*/
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_tracks[i]; j++) {
track = &_track_generator->getTracks()[i][j];
num_segments = track->getNumSegments();
segments = track->getSegments();
/* Iterate over the Track's segments to update FSR volumes */
for (int s = 0; s < num_segments; s++) {
curr_segment = &segments[s];
volume = curr_segment->_length * azim_weights[i];
temp_FSR_volumes[curr_segment->_region_id] += volume;
}
}
}
/* Copy the temporary array of FSRs to the device */
cudaMemcpy((void*)_FSR_volumes, (void*)temp_FSR_volumes,
_num_FSRs * sizeof(FP_PRECISION), cudaMemcpyHostToDevice);
cudaMemcpy((void*)_FSR_materials, (void*)FSRs_to_materials,
_num_FSRs * sizeof(int), cudaMemcpyHostToDevice);
/* Copy the number of FSRs into constant memory on the GPU */
cudaMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Free the temporary array of FSRs on the host */
free(temp_FSR_volumes);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSRs "
"on the device. Backtrace:%s", e.what());
}
initializeThrustVectors();
}
/**
* @brief Allocates data on the GPU for all Materials data.
*/
void GPUSolver::initializeMaterials() {
log_printf(INFO, "Initializing materials on the GPU...");
/* Delete old materials array if it exists */
if (_materials != NULL)
cudaFree(_materials);
/* Allocate memory for all dev_materials on the device */
try{
std::map<int, Material*> host_materials=_geometry->getMaterials();
std::map<int, Material*>::iterator iter;
/* Iterate through all Materials and clone them as dev_material structs
* on the device */
cudaMalloc((void**)&_materials, _num_materials * sizeof(dev_material));
for (iter=host_materials.begin(); iter != host_materials.end(); ++iter)
clone_material_on_gpu(iter->second, &_materials[iter->second->getUid()]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_materials. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory on the GPU for all Tracks in the simulation.
*/
void GPUSolver::initializeTracks() {
log_printf(INFO, "Initializing tracks on the GPU...");
/* Delete old Tracks array if it exists */
if (_dev_tracks != NULL)
cudaFree(_dev_tracks);
/* Allocate memory for all Tracks and Track offset indices on the device */
try{
/* Allocate array of dev_tracks */
cudaMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track));
/* Iterate through all Tracks and clone them as dev_tracks on the device */
int index;
for (int i=0; i < _tot_num_tracks; i++) {
clone_track_on_gpu(_tracks[i], &_dev_tracks[i]);
/* Make Track reflective */
index = computeScalarTrackIndex(_tracks[i]->getTrackInI(),
_tracks[i]->getTrackInJ());
cudaMemcpy((void*)&_dev_tracks[i]._track_in,
(void*)&index, sizeof(int), cudaMemcpyHostToDevice);
index = computeScalarTrackIndex(_tracks[i]->getTrackOutI(),
_tracks[i]->getTrackOutJ());
cudaMemcpy((void*)&_dev_tracks[i]._track_out,
(void*)&index, sizeof(int), cudaMemcpyHostToDevice);
}
/* Copy the array of number of Tracks for each azimuthal angle into
* constant memory on GPU */
cudaMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
cudaMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the number of azimuthal angles into constant memory on GPU */
cudaMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Copy the array of number of Tracks for each azimuthal angles into
* constant memory on GPU */
cudaMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
cudaMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, cudaMemcpyHostToDevice);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_tracks on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for Track boundary angular fluxes and leakages
* and FSR scalar fluxes on the GPU.
* @details Deletes memory for old flux arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeFluxArrays() {
log_printf(INFO, "Initializing flux arrays on the GPU...");
/* Delete old flux arrays if they exist */
if (_boundary_flux != NULL)
cudaFree(_boundary_flux);
if (_scalar_flux != NULL)
cudaFree(_scalar_flux);
/* Allocate memory for all flux arrays on the device */
try{
cudaMalloc((void**)&_boundary_flux,
2*_tot_num_tracks * _polar_times_groups*sizeof(FP_PRECISION));
cudaMalloc((void**)&_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's fluxes "
"on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for FSR source arrays on the GPU.
* @details Deletes memory for old source arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeSourceArrays() {
log_printf(INFO, "Initializing source arrays on the GPU...");
/* Delete old sources arrays if they exist */
if (_source != NULL)
cudaFree(_source);
if (_old_source != NULL)
cudaFree(_old_source);
if (_reduced_source != NULL)
cudaFree(_reduced_source);
/* Allocate memory for all source arrays on the device */
try{
cudaMalloc((void**)&_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
cudaMalloc((void**)&_old_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
cudaMalloc((void**)&_reduced_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSR "
"sources array on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Initialize Thrust vectors for the fission and absorption rates,
* source residuals, leakage and fission sources.
*/
void GPUSolver::initializeThrustVectors() {
log_printf(INFO, "Initializing Thrust vectors on the GPU...");
/* Delete old vectors if they exist */
if (_fission_sources != NULL) {
_fission_sources = NULL;
_fission_sources_vec.clear();
}
if (_tot_absorption != NULL) {
_tot_absorption = NULL;
_tot_absorption_vec.clear();
}
if (_tot_fission != NULL) {
_tot_fission = NULL;
_tot_fission_vec.clear();
}
if (_source_residuals != NULL) {
_source_residuals = NULL;
_source_residuals_vec.clear();
}
if (_leakage != NULL) {
_leakage = NULL;
_leakage_vec.clear();
}
/* Allocate memory for fission, absorption and source vectors on device */
try{
/* Allocate fission source array on device */
_fission_sources_vec.resize(_B * _T);
_fission_sources = thrust::raw_pointer_cast(&_fission_sources_vec[0]);
/* Allocate total absorption reaction rate array on device */
_tot_absorption_vec.resize(_B * _T);
_tot_absorption = thrust::raw_pointer_cast(&_tot_absorption_vec[0]);
/* Allocate fission reaction rate array on device */
_tot_fission_vec.resize(_B * _T);
_tot_fission = thrust::raw_pointer_cast(&_tot_fission_vec[0]);
/* Allocate source residual array on device */
_source_residuals_vec.resize(_B * _T);
_source_residuals = thrust::raw_pointer_cast(&_source_residuals_vec[0]);
/* Allocate leakage array on device */
_leakage_vec.resize(_B * _T);
_leakage = thrust::raw_pointer_cast(&_leakage_vec[0]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"Thrust vectors. Backtrace:%s", e.what());
}
}
/**
* @brief This method computes the index for the Track j at azimuthal angle i.
* @details This method is necessary since the array of dev_tracks on the device
* is a 1D array which needs a one-to-one mapping from the 2D jagged
* array of Tracks on the host.
* @param i azimuthal angle number
* @param j the jth track at angle i
* @return an index into the device track array
*/
int GPUSolver::computeScalarTrackIndex(int i, int j) {
int index =0;
int p = 0;
/* Iterate over each azimuthal angle and increment index by the number of
* Tracks at each angle */
while (p < i) {
index += _num_tracks[p];
p++;
}
/* Update index for this Track since it is the jth Track at angle i */
index += j;
return index;
}
/**
* @brief Builds a linear interpolation table to compute exponentials for
* each segment of each Track for each polar angle on the GPU.
*/
void GPUSolver::buildExpInterpTable(){
log_printf(INFO, "Building exponential interpolation table on device...");
/* Copy a boolean indicating whether or not to use the linear interpolation
* table or the exp intrinsic function */
cudaMemcpyToSymbol(interpolate_exponential,(void*)&_interpolate_exponential,
sizeof(bool), 0, cudaMemcpyHostToDevice);
/* Copy the sines of the polar angles which is needed if the user
* requested the use of the exp intrinsic to evaluate exponentials */
cudaMemcpyToSymbol(sinthetas, (void*)_quad->getSinThetas(),
_num_polar * sizeof(FP_PRECISION), 0,
cudaMemcpyHostToDevice);
/* Set size of interpolation table */
int num_array_values =
10 * sqrt(1. / (8. * _source_convergence_thresh * 1e-2));
_exp_table_spacing = 10. / num_array_values;
_inverse_exp_table_spacing = 1.0 / _exp_table_spacing;
_exp_table_size = _two_times_num_polar * num_array_values;
_exp_table_max_index = _exp_table_size - _two_times_num_polar - 1;
/* Allocate arrays */
FP_PRECISION* exp_table = new FP_PRECISION[_exp_table_size];
FP_PRECISION expon;
FP_PRECISION intercept;
FP_PRECISION slope;
/* Create exponential interpolation table */
for (int i = 0; i < num_array_values; i ++){
for (int p = 0; p < _num_polar; p++){
expon = exp(- (i * _exp_table_spacing) / _quad->getSinTheta(p));
slope = - expon / _quad->getSinTheta(p);
intercept = expon * (1 + (i * _exp_table_spacing)/_quad->getSinTheta(p));
exp_table[_two_times_num_polar * i + 2 * p] = slope;
exp_table[_two_times_num_polar * i + 2 * p + 1] = intercept;
}
}
/* Allocate memory for the interpolation table on the device */
cudaMalloc((void**)&_exp_table, _exp_table_size * sizeof(FP_PRECISION));
/* Copy exponential interpolation table to the device */
cudaMemcpy((void*)_exp_table, (void*)exp_table,
_exp_table_size * sizeof(FP_PRECISION),
cudaMemcpyHostToDevice);
/* Copy table size and spacing to constant memory on the device */
cudaMemcpyToSymbol(exp_table_spacing, (void*)&_exp_table_spacing,
sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(inverse_exp_table_spacing,
(void*)&_inverse_exp_table_spacing,
sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(exp_table_max_index, (void*)&_exp_table_max_index,
sizeof(int), 0, cudaMemcpyHostToDevice);
free(exp_table);
return;
}
/**
* @brief Zero each Track's boundary fluxes for each energy group and polar
* angle in the "forward" and "reverse" directions.
*/
void GPUSolver::zeroTrackFluxes() {
int size = 2 * _tot_num_tracks * _num_polar * _num_groups;
size *= sizeof(FP_PRECISION);
cudaMemset(_boundary_flux, 0.0, size);
return;
}
/**
* @brief Set the FSR scalar flux for each energy group to some value.
* @param value the value to assign to each FSR scalar flux
*/
void GPUSolver::flattenFSRFluxes(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
cudaMemset(_scalar_flux, value, size);
return;
}
/**
* @brief Set the FSR source for each energy group to some value.
* @param value the value to assign to each FSR source
*/
void GPUSolver::flattenFSRSources(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
cudaMemset(_source, value, size);
cudaMemset(_old_source, value, size);
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
*/
void GPUSolver::normalizeFluxes() {
int shared_mem = sizeof(FP_PRECISION) * _T;
computeFissionSourcesOnDevice<<<_B, _T, shared_mem>>>(_FSR_volumes,
_FSR_materials,
_materials,
_scalar_flux,
_fission_sources);
FP_PRECISION norm_factor = 1.0 / thrust::reduce(_fission_sources_vec.begin(),
_fission_sources_vec.end());
normalizeFluxesOnDevice<<<_B, _T>>>(_scalar_flux, _boundary_flux,norm_factor);
}
/**
* @brief Computes the total source (fission and scattering) in each FSR.
* @details This method computes the total source in each FSR based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @return the residual between this source and the previous source
*/
FP_PRECISION GPUSolver::computeFSRSources() {
computeFSRSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials,
_scalar_flux, _source, _old_source,
_reduced_source, 1.0 / _k_eff,
_source_residuals);
FP_PRECISION residual = thrust::reduce(_source_residuals_vec.begin(),
_source_residuals_vec.end());
residual = sqrt(residual / (_num_groups * _num_FSRs));
return residual;
}
/**
* @brief This method performs one transport sweep of all azimuthal angles,
* Tracks, Track segments, polar angles and energy groups.
* @details The method integrates the flux along each Track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each flat source region.
*/
void GPUSolver::transportSweep() {
int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION);
int tid_offset, tid_max;
log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads",
_B, _T);
/* Initialize leakage to zero */
thrust::fill(_leakage_vec.begin(), _leakage_vec.end(), 0.0);
/* Initialize flux in each FSR to zero */
flattenFSRFluxes(0.0);
/* Sweep the first halfspace of azimuthal angle space */
tid_offset = 0;
tid_max = (_tot_num_tracks / 2);
transportSweepOnDevice<<<_B, _T, shared_mem>>>(_scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
/* Sweep the second halfspace of azimuthal angle space */
tid_offset = tid_max * _num_groups;
tid_max = _tot_num_tracks;
transportSweepOnDevice<<<_B, _T, shared_mem>>>(_scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux.
*/
void GPUSolver::addSourceToScalarFlux() {
addSourceToScalarFluxOnDevice<<<_B,_T>>>(_scalar_flux, _reduced_source,
_FSR_volumes, _FSR_materials,
_materials);
}
/**
* @brief Compute \f$ k_{eff} \f$ from the total fission and absorption rates.
* @details This method computes the current approximation to the
* multiplication factor on this iteration as follows:
* \f$ k_{eff} = \frac{\displaystyle\sum \displaystyle\sum \nu
* \Sigma_f \Phi V}{\displaystyle\sum
* \displaystyle\sum \Sigma_a \Phi V} \f$
*/
void GPUSolver::computeKeff() {
FP_PRECISION tot_absorption;
FP_PRECISION tot_fission;
FP_PRECISION tot_leakage;
/* Compute the total fission and absorption rates on the device.
* This kernel stores partial rates in a Thrust vector with as many
* entries as CUDAthreads executed by the kernel */
computeFissionAndAbsorption<<<_B, _T>>>(_FSR_volumes, _FSR_materials,
_materials, _scalar_flux,
_tot_absorption, _tot_fission);
cudaDeviceSynchronize();
/* Compute the total absorption rate by reducing the partial absorption
* rates compiled in the Thrust vector */
tot_absorption = thrust::reduce(_tot_absorption_vec.begin(),
_tot_absorption_vec.end());
/* Compute the total fission rate by reducing the partial fission
* rates compiled in the Thrust vector */
tot_fission = thrust::reduce(_tot_fission_vec.begin(),_tot_fission_vec.end());
cudaMemcpy((void*)&tot_fission, (void*)_tot_fission,
_B * _T * sizeof(FP_PRECISION), cudaMemcpyHostToDevice);
/* Compute the total leakage by reducing the partial leakage
* rates compiled in the Thrust vector */
tot_leakage = 0.5 * thrust::reduce(_leakage_vec.begin(), _leakage_vec.end());
/* Compute the new keff from the fission and absorption rates */
_k_eff = tot_fission / (tot_absorption + tot_leakage);
log_printf(DEBUG, "abs = %f, fiss = %f, leak = %f, keff = %f",
tot_absorption, tot_fission, tot_leakage, _k_eff);
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID.
* @details This is a helper method for SWIG to allow users to retrieve
* FSR fission rates as a NumPy array. An example of how this method
* can be called from Python is as follows:
*
* @code
* num_FSRs = geometry.getNumFSRs()
* fission_rates = solver.computeFSRFissionRates(num_FSRs)
* @endcode
*
* @param fission_rates an array to store the fission rates (implicitly passed
* in as a NumPy array from Python)
* @param num_FSRs the number of FSRs passed in from Python
*/
void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) {
log_printf(INFO, "Computing FSR fission rates...");
/* Allocate memory for the FSR fission rates on the device */
double* dev_fission_rates;
cudaMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(double));
/* Compute the FSR fission rates on the device */
computeFSRFissionRatesOnDevice<<<_B,_T>>>(dev_fission_rates,
_FSR_materials,
_materials,
_scalar_flux);
/* Copy the fission rate array from the device to the host */
cudaMemcpy((void*)fission_rates, (void*)dev_fission_rates,
_num_FSRs * sizeof(double), cudaMemcpyDeviceToHost);
/* Deallocate the memory assigned to store the fission rates on the device */
cudaFree(dev_fission_rates);
return;
}
|
886e9a9eb5ddb38ead4f359c906687a9d8d59367.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by ss on 19-1-20.
//
#include "thundergbm/builder/hist_tree_builder.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"
#include "thrust/sequence.h"
#include "thrust/binary_search.h"
#include "thundergbm/util/multi_device.h"
void HistTreeBuilder::get_bin_ids() {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SparseColumns &columns = shards[device_id].columns;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
using namespace thrust;
int n_column = columns.n_column;
int nnz = columns.nnz;
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto cut_points_ptr = cut.cut_points_val.device_data();
auto csc_val_data = columns.csc_val.device_data();
SyncArray<unsigned char> bin_id;
bin_id.resize(columns.nnz);
auto bin_id_data = bin_id.device_data();
int n_block = fminf((nnz / n_column - 1) / 256 + 1, 4 * 56);
{
auto lowerBound = [=]__device__(const float_type *search_begin, const float_type *search_end, float_type val) {
const float_type *left = search_begin;
const float_type *right = search_end - 1;
while (left != right) {
const float_type *mid = left + (right - left) / 2;
if (*mid <= val)
right = mid;
else left = mid + 1;
}
return left;
};
TIMED_SCOPE(timerObj, "binning");
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int cid, int i) {
auto search_begin = cut_points_ptr + cut_row_ptr[cid];
auto search_end = cut_points_ptr + cut_row_ptr[cid + 1];
auto val = csc_val_data[i];
bin_id_data[i] = lowerBound(search_begin, search_end, val) - search_begin;
}, n_block);
}
auto max_num_bin = param.max_num_bin;
dense_bin_id.resize(n_instances * n_column);
auto dense_bin_id_data = dense_bin_id.device_data();
auto csc_row_idx_data = columns.csc_row_idx.device_data();
device_loop(n_instances * n_column, [=]__device__(int i) {
dense_bin_id_data[i] = max_num_bin;
});
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int fid, int i) {
int row = csc_row_idx_data[i];
unsigned char bid = bin_id_data[i];
dense_bin_id_data[row * n_column + fid] = bid;
}, n_block);
});
}
void HistTreeBuilder::find_split(int level, int device_id) {
std::chrono::high_resolution_clock timer;
const SparseColumns &columns = shards[device_id].columns;
SyncArray<int> &nid = ins2node_id[device_id];
SyncArray<GHPair> &gh_pair = gradients[device_id];
Tree &tree = trees[device_id];
SyncArray<SplitPoint> &sp = this->sp[device_id];
SyncArray<bool> &ignored_set = shards[device_id].ignored_set;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
auto &last_hist = this->last_hist[device_id];
TIMED_FUNC(timerObj);
int n_nodes_in_level = static_cast<int>(pow(2, level));
int nid_offset = static_cast<int>(pow(2, level) - 1);
int n_column = columns.n_column;
int n_partition = n_column * n_nodes_in_level;
int n_bins = cut.cut_points_val.size();
int n_max_nodes = 2 << param.depth;
int n_max_splits = n_max_nodes * n_bins;
int n_split = n_nodes_in_level * n_bins;
LOG(TRACE) << "start finding split";
//find the best split locally
{
using namespace thrust;
auto t_build_start = timer.now();
//calculate split information for each split
SyncArray<GHPair> hist(n_max_splits);
SyncArray<GHPair> missing_gh(n_partition);
auto cut_fid_data = cut.cut_fid.device_data();
auto i2fid = [=] __device__(int i) { return cut_fid_data[i % n_bins]; };
auto hist_fid = make_transform_iterator(counting_iterator<int>(0), i2fid);
{
{
TIMED_SCOPE(timerObj, "build hist");
{
size_t
smem_size = n_bins * sizeof(GHPair);
LOG(DEBUG) << "shared memory size = " << smem_size / 1024.0 << " KB";
if (n_nodes_in_level == 1) {
//root
auto hist_data = hist.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
auto n_instances = this->n_instances;
if (smem_size > 48 * 1024) {
device_loop(n_instances * n_column, [=]__device__(int i) {
int iid = i / n_column;
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = hist_data[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
});
} else {
int num_fv = n_instances * n_column;
anonymous_kernel([=]__device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = i / n_column;
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair &dest = hist_data[i];
GHPair src = local_hist[i];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}, num_fv, smem_size);
}
} else {
//otherwise
auto t_dp_begin = timer.now();
SyncArray<int> node_idx(n_instances);
SyncArray<int> node_ptr(n_nodes_in_level + 1);
{
TIMED_SCOPE(timerObj, "data partitioning");
SyncArray<int> nid4sort(n_instances);
nid4sort.copy_from(ins2node_id[device_id]);
sequence(cuda::par, node_idx.device_data(), node_idx.device_end(), 0);
cub_sort_by_key(nid4sort, node_idx);
auto counting_iter = make_counting_iterator < int > (nid_offset);
node_ptr.host_data()[0] =
lower_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), nid_offset) -
nid4sort.device_data();
upper_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), counting_iter,
counting_iter + n_nodes_in_level, node_ptr.device_data() + 1);
LOG(DEBUG) << "node ptr = " << node_ptr;
hipDeviceSynchronize();
}
auto t_dp_end = timer.now();
std::chrono::duration<double> dp_used_time = t_dp_end - t_dp_begin;
this->total_dp_time += dp_used_time.count();
auto node_ptr_data = node_ptr.host_data();
auto node_idx_data = node_idx.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
for (int i = 0; i < n_nodes_in_level / 2; ++i) {
int nid0_to_compute = i * 2;
int nid0_to_substract = i * 2 + 1;
int n_ins_left = node_ptr_data[nid0_to_compute + 1] - node_ptr_data[nid0_to_compute];
int n_ins_right = node_ptr_data[nid0_to_substract + 1] - node_ptr_data[nid0_to_substract];
if (max(n_ins_left, n_ins_right) == 0) continue;
if (n_ins_left > n_ins_right)
swap(nid0_to_compute, nid0_to_substract);
//compute
{
int nid0 = nid0_to_compute;
auto idx_begin = node_ptr.host_data()[nid0];
auto idx_end = node_ptr.host_data()[nid0 + 1];
auto hist_data = hist.device_data() + nid0 * n_bins;
this->total_hist_num++;
if (smem_size > 48 * 1024) {
device_loop((idx_end - idx_begin) * n_column, [=]__device__(int i) {
int iid = node_idx_data[i / n_column + idx_begin];
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = hist_data[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
});
} else {
int num_fv = (idx_end - idx_begin) * n_column;
anonymous_kernel([=] __device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = node_idx_data[i / n_column + idx_begin];
//int fid = i - n_column *( i / n_column);
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
GHPair &dest = hist_data[i];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}, num_fv, smem_size);
}
}
//subtract
auto t_copy_start = timer.now();
{
auto hist_data_computed = hist.device_data() + nid0_to_compute * n_bins;
auto hist_data_to_compute = hist.device_data() + nid0_to_substract * n_bins;
auto father_hist_data = last_hist.device_data() + (nid0_to_substract / 2) * n_bins;
device_loop(n_bins, [=]__device__(int i) {
hist_data_to_compute[i] = father_hist_data[i] - hist_data_computed[i];
});
}
auto t_copy_end = timer.now();
std::chrono::duration<double> cp_used_time = t_copy_end - t_copy_start;
this->total_copy_time += cp_used_time.count();
// PERFORMANCE_CHECKPOINT(timerObj);
} // end for each node
}//end # node > 1
last_hist.copy_from(hist);
hipDeviceSynchronize();
}
LOG(DEBUG) << "level: " << level;
LOG(DEBUG) << "hist new = " << hist;
auto t_build_hist_end = timer.now();
std::chrono::duration<double> bh_used_time = t_build_hist_end - t_build_start;
this->build_hist_used_time += bh_used_time.count();
this->build_n_hist++;
LOG(DEBUG) << "-------------->>> build_hist_used_time: " << bh_used_time.count();
LOG(DEBUG) << "-------------->>> build_num_hist: " << this->build_n_hist;
LOG(DEBUG) << "-------------->>> total_build_hist_used_time: " << this->build_hist_used_time - this->total_dp_time;
LOG(DEBUG) << "-------------->>> n_hist::::: " << this->total_hist_num;
LOG(DEBUG) << "-------------->>> dp_time::::: " << this->total_dp_time;
LOG(DEBUG) << "-------------->>> cp_time::::: " << this->total_copy_time;
//LOG(DEBUG) << "cutfid = " << cut.cut_fid;
inclusive_scan_by_key(cuda::par, hist_fid, hist_fid + n_split,
hist.device_data(), hist.device_data());
LOG(DEBUG) << hist;
auto nodes_data = tree.nodes.device_data();
auto missing_gh_data = missing_gh.device_data();
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto hist_data = hist.device_data();
device_loop(n_partition, [=]__device__(int pid) {
int nid0 = pid / n_column;
int nid = nid0 + nid_offset;
if (!nodes_data[nid].splittable()) return;
int fid = pid % n_column;
if (cut_row_ptr[fid + 1] != cut_row_ptr[fid]) {
GHPair node_gh = hist_data[nid0 * n_bins + cut_row_ptr[fid + 1] - 1];
missing_gh_data[pid] = nodes_data[nid].sum_gh_pair - node_gh;
}
});
LOG(DEBUG) << missing_gh;
}
}
//calculate gain of each split
SyncArray<float_type> gain(n_max_splits);
{
// TIMED_SCOPE(timerObj, "calculate gain");
auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight,
float_type lambda) -> float_type {
if (lch.h >= min_child_weight && rch.h >= min_child_weight)
return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) -
(father.g * father.g) / (father.h + lambda);
else
return 0;
};
const Tree::TreeNode *nodes_data = tree.nodes.device_data();
GHPair *gh_prefix_sum_data = hist.device_data();
float_type *gain_data = gain.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto ignored_set_data = ignored_set.device_data();
//for lambda expression
float_type mcw = param.min_child_weight;
float_type l = param.lambda;
device_loop(n_split, [=]__device__(int i) {
int nid0 = i / n_bins;
int nid = nid0 + nid_offset;
int fid = hist_fid[i % n_bins];
if (nodes_data[nid].is_valid && !ignored_set_data[fid]) {
int pid = nid0 * n_column + hist_fid[i];
GHPair father_gh = nodes_data[nid].sum_gh_pair;
GHPair p_missing_gh = missing_gh_data[pid];
GHPair rch_gh = gh_prefix_sum_data[i];
float_type default_to_left_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
rch_gh = rch_gh + p_missing_gh;
float_type default_to_right_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
if (default_to_left_gain > default_to_right_gain)
gain_data[i] = default_to_left_gain;
else
gain_data[i] = -default_to_right_gain;//negative means default split to right
} else gain_data[i] = 0;
});
LOG(DEBUG) << "gain = " << gain;
}
SyncArray<int_float> best_idx_gain(n_nodes_in_level);
{
// TIMED_SCOPE(timerObj, "get best gain");
auto arg_abs_max = []__device__(const int_float &a, const int_float &b) {
if (fabsf(get<1>(a)) == fabsf(get<1>(b)))
return get<0>(a) < get<0>(b) ? a : b;
else
return fabsf(get<1>(a)) > fabsf(get<1>(b)) ? a : b;
};
auto nid_iterator = make_transform_iterator(counting_iterator<int>(0), placeholders::_1 / n_bins);
reduce_by_key(
cuda::par,
nid_iterator, nid_iterator + n_split,
make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())),
make_discard_iterator(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max
);
LOG(DEBUG) << n_split;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
}
//get split points
{
const int_float *best_idx_gain_data = best_idx_gain.device_data();
auto hist_data = hist.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto cut_val_data = cut.cut_points_val.device_data();
sp.resize(n_nodes_in_level);
auto sp_data = sp.device_data();
auto nodes_data = tree.nodes.device_data();
int column_offset = columns.column_offset;
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop(n_nodes_in_level, [=]__device__(int i) {
int_float bst = best_idx_gain_data[i];
float_type best_split_gain = get<1>(bst);
int split_index = get<0>(bst);
if (!nodes_data[i + nid_offset].is_valid) {
sp_data[i].split_fea_id = -1;
sp_data[i].nid = -1;
return;
}
int fid = hist_fid[split_index];
sp_data[i].split_fea_id = fid + column_offset;
sp_data[i].nid = i + nid_offset;
sp_data[i].gain = fabsf(best_split_gain);
sp_data[i].fval = cut_val_data[split_index % n_bins];
sp_data[i].split_bid = (unsigned char) (split_index % n_bins - cut_row_ptr_data[fid]);
sp_data[i].fea_missing_gh = missing_gh_data[i * n_column + hist_fid[split_index]];
sp_data[i].default_right = best_split_gain < 0;
sp_data[i].rch_sum_gh = hist_data[split_index];
});
}
}
LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp;
}
void HistTreeBuilder::update_ins2node_id() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SyncArray<bool> has_splittable(1);
auto &columns = shards[device_id].columns;
//set new node id for each instance
{
// TIMED_SCOPE(timerObj, "get new node id");
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
has_splittable.host_data()[0] = false;
bool *h_s_data = has_splittable.device_data();
int column_offset = columns.column_offset;
int n_column = columns.n_column;
auto dense_bin_id_data = dense_bin_id[device_id].device_data();
int max_num_bin = param.max_num_bin;
device_loop(n_instances, [=]__device__(int iid) {
int nid = nid_data[iid];
const Tree::TreeNode &node = nodes_data[nid];
int split_fid = node.split_feature_id;
if (node.splittable() && ((split_fid - column_offset < n_column) && (split_fid >= column_offset))) {
h_s_data[0] = true;
unsigned char split_bid = node.split_bid;
unsigned char bid = dense_bin_id_data[iid * n_column + split_fid - column_offset];
bool to_left = true;
if ((bid == max_num_bin && node.default_right) || (bid <= split_bid))
to_left = false;
if (to_left) {
//goes to left child
nid_data[iid] = node.lch_index;
} else {
//right child
nid_data[iid] = node.rch_index;
}
}
});
}
LOG(DEBUG) << "new tree_id = " << ins2node_id[device_id];
has_split[device_id] = has_splittable.host_data()[0];
});
}
void HistTreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
TreeBuilder::init(dataset, param);
//TODO refactor
//init shards
int n_device = param.n_device;
shards = vector<Shard>(n_device);
vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].reset(&shards[i].columns);
shards[i].ignored_set = SyncArray<bool>(dataset.n_features());
}
SparseColumns columns;
if(dataset.use_cpu)
columns.csr2csc_cpu(dataset, v_columns);
else
columns.csr2csc_gpu(dataset, v_columns);
cut = vector<HistCut>(param.n_device);
dense_bin_id = MSyncArray<unsigned char>(param.n_device);
last_hist = MSyncArray<GHPair>(param.n_device);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
if(dataset.use_cpu)
cut[device_id].get_cut_points2(shards[device_id].columns, param.max_num_bin, n_instances);
else
cut[device_id].get_cut_points3(shards[device_id].columns, param.max_num_bin, n_instances);
last_hist[device_id].resize((2 << param.depth) * cut[device_id].cut_points_val.size());
});
get_bin_ids();
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].release();
}
// SyncMem::clear_cache();
int gpu_num;
hipError_t err = hipGetDeviceCount(&gpu_num);
std::atexit([](){
SyncMem::clear_cache();
});
}
| 886e9a9eb5ddb38ead4f359c906687a9d8d59367.cu | //
// Created by ss on 19-1-20.
//
#include "thundergbm/builder/hist_tree_builder.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"
#include "thrust/sequence.h"
#include "thrust/binary_search.h"
#include "thundergbm/util/multi_device.h"
void HistTreeBuilder::get_bin_ids() {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SparseColumns &columns = shards[device_id].columns;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
using namespace thrust;
int n_column = columns.n_column;
int nnz = columns.nnz;
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto cut_points_ptr = cut.cut_points_val.device_data();
auto csc_val_data = columns.csc_val.device_data();
SyncArray<unsigned char> bin_id;
bin_id.resize(columns.nnz);
auto bin_id_data = bin_id.device_data();
int n_block = fminf((nnz / n_column - 1) / 256 + 1, 4 * 56);
{
auto lowerBound = [=]__device__(const float_type *search_begin, const float_type *search_end, float_type val) {
const float_type *left = search_begin;
const float_type *right = search_end - 1;
while (left != right) {
const float_type *mid = left + (right - left) / 2;
if (*mid <= val)
right = mid;
else left = mid + 1;
}
return left;
};
TIMED_SCOPE(timerObj, "binning");
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int cid, int i) {
auto search_begin = cut_points_ptr + cut_row_ptr[cid];
auto search_end = cut_points_ptr + cut_row_ptr[cid + 1];
auto val = csc_val_data[i];
bin_id_data[i] = lowerBound(search_begin, search_end, val) - search_begin;
}, n_block);
}
auto max_num_bin = param.max_num_bin;
dense_bin_id.resize(n_instances * n_column);
auto dense_bin_id_data = dense_bin_id.device_data();
auto csc_row_idx_data = columns.csc_row_idx.device_data();
device_loop(n_instances * n_column, [=]__device__(int i) {
dense_bin_id_data[i] = max_num_bin;
});
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int fid, int i) {
int row = csc_row_idx_data[i];
unsigned char bid = bin_id_data[i];
dense_bin_id_data[row * n_column + fid] = bid;
}, n_block);
});
}
void HistTreeBuilder::find_split(int level, int device_id) {
std::chrono::high_resolution_clock timer;
const SparseColumns &columns = shards[device_id].columns;
SyncArray<int> &nid = ins2node_id[device_id];
SyncArray<GHPair> &gh_pair = gradients[device_id];
Tree &tree = trees[device_id];
SyncArray<SplitPoint> &sp = this->sp[device_id];
SyncArray<bool> &ignored_set = shards[device_id].ignored_set;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
auto &last_hist = this->last_hist[device_id];
TIMED_FUNC(timerObj);
int n_nodes_in_level = static_cast<int>(pow(2, level));
int nid_offset = static_cast<int>(pow(2, level) - 1);
int n_column = columns.n_column;
int n_partition = n_column * n_nodes_in_level;
int n_bins = cut.cut_points_val.size();
int n_max_nodes = 2 << param.depth;
int n_max_splits = n_max_nodes * n_bins;
int n_split = n_nodes_in_level * n_bins;
LOG(TRACE) << "start finding split";
//find the best split locally
{
using namespace thrust;
auto t_build_start = timer.now();
//calculate split information for each split
SyncArray<GHPair> hist(n_max_splits);
SyncArray<GHPair> missing_gh(n_partition);
auto cut_fid_data = cut.cut_fid.device_data();
auto i2fid = [=] __device__(int i) { return cut_fid_data[i % n_bins]; };
auto hist_fid = make_transform_iterator(counting_iterator<int>(0), i2fid);
{
{
TIMED_SCOPE(timerObj, "build hist");
{
size_t
smem_size = n_bins * sizeof(GHPair);
LOG(DEBUG) << "shared memory size = " << smem_size / 1024.0 << " KB";
if (n_nodes_in_level == 1) {
//root
auto hist_data = hist.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
auto n_instances = this->n_instances;
if (smem_size > 48 * 1024) {
device_loop(n_instances * n_column, [=]__device__(int i) {
int iid = i / n_column;
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = hist_data[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
});
} else {
int num_fv = n_instances * n_column;
anonymous_kernel([=]__device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = i / n_column;
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair &dest = hist_data[i];
GHPair src = local_hist[i];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}, num_fv, smem_size);
}
} else {
//otherwise
auto t_dp_begin = timer.now();
SyncArray<int> node_idx(n_instances);
SyncArray<int> node_ptr(n_nodes_in_level + 1);
{
TIMED_SCOPE(timerObj, "data partitioning");
SyncArray<int> nid4sort(n_instances);
nid4sort.copy_from(ins2node_id[device_id]);
sequence(cuda::par, node_idx.device_data(), node_idx.device_end(), 0);
cub_sort_by_key(nid4sort, node_idx);
auto counting_iter = make_counting_iterator < int > (nid_offset);
node_ptr.host_data()[0] =
lower_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), nid_offset) -
nid4sort.device_data();
upper_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), counting_iter,
counting_iter + n_nodes_in_level, node_ptr.device_data() + 1);
LOG(DEBUG) << "node ptr = " << node_ptr;
cudaDeviceSynchronize();
}
auto t_dp_end = timer.now();
std::chrono::duration<double> dp_used_time = t_dp_end - t_dp_begin;
this->total_dp_time += dp_used_time.count();
auto node_ptr_data = node_ptr.host_data();
auto node_idx_data = node_idx.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
for (int i = 0; i < n_nodes_in_level / 2; ++i) {
int nid0_to_compute = i * 2;
int nid0_to_substract = i * 2 + 1;
int n_ins_left = node_ptr_data[nid0_to_compute + 1] - node_ptr_data[nid0_to_compute];
int n_ins_right = node_ptr_data[nid0_to_substract + 1] - node_ptr_data[nid0_to_substract];
if (max(n_ins_left, n_ins_right) == 0) continue;
if (n_ins_left > n_ins_right)
swap(nid0_to_compute, nid0_to_substract);
//compute
{
int nid0 = nid0_to_compute;
auto idx_begin = node_ptr.host_data()[nid0];
auto idx_end = node_ptr.host_data()[nid0 + 1];
auto hist_data = hist.device_data() + nid0 * n_bins;
this->total_hist_num++;
if (smem_size > 48 * 1024) {
device_loop((idx_end - idx_begin) * n_column, [=]__device__(int i) {
int iid = node_idx_data[i / n_column + idx_begin];
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
int feature_offset = cut_row_ptr_data[fid];
const GHPair src = gh_data[iid];
GHPair &dest = hist_data[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
});
} else {
int num_fv = (idx_end - idx_begin) * n_column;
anonymous_kernel([=] __device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = node_idx_data[i / n_column + idx_begin];
//int fid = i - n_column *( i / n_column);
int fid = i % n_column;
unsigned char bid = dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
GHPair &dest = hist_data[i];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}, num_fv, smem_size);
}
}
//subtract
auto t_copy_start = timer.now();
{
auto hist_data_computed = hist.device_data() + nid0_to_compute * n_bins;
auto hist_data_to_compute = hist.device_data() + nid0_to_substract * n_bins;
auto father_hist_data = last_hist.device_data() + (nid0_to_substract / 2) * n_bins;
device_loop(n_bins, [=]__device__(int i) {
hist_data_to_compute[i] = father_hist_data[i] - hist_data_computed[i];
});
}
auto t_copy_end = timer.now();
std::chrono::duration<double> cp_used_time = t_copy_end - t_copy_start;
this->total_copy_time += cp_used_time.count();
// PERFORMANCE_CHECKPOINT(timerObj);
} // end for each node
}//end # node > 1
last_hist.copy_from(hist);
cudaDeviceSynchronize();
}
LOG(DEBUG) << "level: " << level;
LOG(DEBUG) << "hist new = " << hist;
auto t_build_hist_end = timer.now();
std::chrono::duration<double> bh_used_time = t_build_hist_end - t_build_start;
this->build_hist_used_time += bh_used_time.count();
this->build_n_hist++;
LOG(DEBUG) << "-------------->>> build_hist_used_time: " << bh_used_time.count();
LOG(DEBUG) << "-------------->>> build_num_hist: " << this->build_n_hist;
LOG(DEBUG) << "-------------->>> total_build_hist_used_time: " << this->build_hist_used_time - this->total_dp_time;
LOG(DEBUG) << "-------------->>> n_hist::::: " << this->total_hist_num;
LOG(DEBUG) << "-------------->>> dp_time::::: " << this->total_dp_time;
LOG(DEBUG) << "-------------->>> cp_time::::: " << this->total_copy_time;
//LOG(DEBUG) << "cutfid = " << cut.cut_fid;
inclusive_scan_by_key(cuda::par, hist_fid, hist_fid + n_split,
hist.device_data(), hist.device_data());
LOG(DEBUG) << hist;
auto nodes_data = tree.nodes.device_data();
auto missing_gh_data = missing_gh.device_data();
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto hist_data = hist.device_data();
device_loop(n_partition, [=]__device__(int pid) {
int nid0 = pid / n_column;
int nid = nid0 + nid_offset;
if (!nodes_data[nid].splittable()) return;
int fid = pid % n_column;
if (cut_row_ptr[fid + 1] != cut_row_ptr[fid]) {
GHPair node_gh = hist_data[nid0 * n_bins + cut_row_ptr[fid + 1] - 1];
missing_gh_data[pid] = nodes_data[nid].sum_gh_pair - node_gh;
}
});
LOG(DEBUG) << missing_gh;
}
}
//calculate gain of each split
SyncArray<float_type> gain(n_max_splits);
{
// TIMED_SCOPE(timerObj, "calculate gain");
auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight,
float_type lambda) -> float_type {
if (lch.h >= min_child_weight && rch.h >= min_child_weight)
return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) -
(father.g * father.g) / (father.h + lambda);
else
return 0;
};
const Tree::TreeNode *nodes_data = tree.nodes.device_data();
GHPair *gh_prefix_sum_data = hist.device_data();
float_type *gain_data = gain.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto ignored_set_data = ignored_set.device_data();
//for lambda expression
float_type mcw = param.min_child_weight;
float_type l = param.lambda;
device_loop(n_split, [=]__device__(int i) {
int nid0 = i / n_bins;
int nid = nid0 + nid_offset;
int fid = hist_fid[i % n_bins];
if (nodes_data[nid].is_valid && !ignored_set_data[fid]) {
int pid = nid0 * n_column + hist_fid[i];
GHPair father_gh = nodes_data[nid].sum_gh_pair;
GHPair p_missing_gh = missing_gh_data[pid];
GHPair rch_gh = gh_prefix_sum_data[i];
float_type default_to_left_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
rch_gh = rch_gh + p_missing_gh;
float_type default_to_right_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
if (default_to_left_gain > default_to_right_gain)
gain_data[i] = default_to_left_gain;
else
gain_data[i] = -default_to_right_gain;//negative means default split to right
} else gain_data[i] = 0;
});
LOG(DEBUG) << "gain = " << gain;
}
SyncArray<int_float> best_idx_gain(n_nodes_in_level);
{
// TIMED_SCOPE(timerObj, "get best gain");
auto arg_abs_max = []__device__(const int_float &a, const int_float &b) {
if (fabsf(get<1>(a)) == fabsf(get<1>(b)))
return get<0>(a) < get<0>(b) ? a : b;
else
return fabsf(get<1>(a)) > fabsf(get<1>(b)) ? a : b;
};
auto nid_iterator = make_transform_iterator(counting_iterator<int>(0), placeholders::_1 / n_bins);
reduce_by_key(
cuda::par,
nid_iterator, nid_iterator + n_split,
make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())),
make_discard_iterator(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max
);
LOG(DEBUG) << n_split;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
}
//get split points
{
const int_float *best_idx_gain_data = best_idx_gain.device_data();
auto hist_data = hist.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto cut_val_data = cut.cut_points_val.device_data();
sp.resize(n_nodes_in_level);
auto sp_data = sp.device_data();
auto nodes_data = tree.nodes.device_data();
int column_offset = columns.column_offset;
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop(n_nodes_in_level, [=]__device__(int i) {
int_float bst = best_idx_gain_data[i];
float_type best_split_gain = get<1>(bst);
int split_index = get<0>(bst);
if (!nodes_data[i + nid_offset].is_valid) {
sp_data[i].split_fea_id = -1;
sp_data[i].nid = -1;
return;
}
int fid = hist_fid[split_index];
sp_data[i].split_fea_id = fid + column_offset;
sp_data[i].nid = i + nid_offset;
sp_data[i].gain = fabsf(best_split_gain);
sp_data[i].fval = cut_val_data[split_index % n_bins];
sp_data[i].split_bid = (unsigned char) (split_index % n_bins - cut_row_ptr_data[fid]);
sp_data[i].fea_missing_gh = missing_gh_data[i * n_column + hist_fid[split_index]];
sp_data[i].default_right = best_split_gain < 0;
sp_data[i].rch_sum_gh = hist_data[split_index];
});
}
}
LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp;
}
void HistTreeBuilder::update_ins2node_id() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SyncArray<bool> has_splittable(1);
auto &columns = shards[device_id].columns;
//set new node id for each instance
{
// TIMED_SCOPE(timerObj, "get new node id");
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
has_splittable.host_data()[0] = false;
bool *h_s_data = has_splittable.device_data();
int column_offset = columns.column_offset;
int n_column = columns.n_column;
auto dense_bin_id_data = dense_bin_id[device_id].device_data();
int max_num_bin = param.max_num_bin;
device_loop(n_instances, [=]__device__(int iid) {
int nid = nid_data[iid];
const Tree::TreeNode &node = nodes_data[nid];
int split_fid = node.split_feature_id;
if (node.splittable() && ((split_fid - column_offset < n_column) && (split_fid >= column_offset))) {
h_s_data[0] = true;
unsigned char split_bid = node.split_bid;
unsigned char bid = dense_bin_id_data[iid * n_column + split_fid - column_offset];
bool to_left = true;
if ((bid == max_num_bin && node.default_right) || (bid <= split_bid))
to_left = false;
if (to_left) {
//goes to left child
nid_data[iid] = node.lch_index;
} else {
//right child
nid_data[iid] = node.rch_index;
}
}
});
}
LOG(DEBUG) << "new tree_id = " << ins2node_id[device_id];
has_split[device_id] = has_splittable.host_data()[0];
});
}
void HistTreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
TreeBuilder::init(dataset, param);
//TODO refactor
//init shards
int n_device = param.n_device;
shards = vector<Shard>(n_device);
vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].reset(&shards[i].columns);
shards[i].ignored_set = SyncArray<bool>(dataset.n_features());
}
SparseColumns columns;
if(dataset.use_cpu)
columns.csr2csc_cpu(dataset, v_columns);
else
columns.csr2csc_gpu(dataset, v_columns);
cut = vector<HistCut>(param.n_device);
dense_bin_id = MSyncArray<unsigned char>(param.n_device);
last_hist = MSyncArray<GHPair>(param.n_device);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
if(dataset.use_cpu)
cut[device_id].get_cut_points2(shards[device_id].columns, param.max_num_bin, n_instances);
else
cut[device_id].get_cut_points3(shards[device_id].columns, param.max_num_bin, n_instances);
last_hist[device_id].resize((2 << param.depth) * cut[device_id].cut_points_val.size());
});
get_bin_ids();
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].release();
}
// SyncMem::clear_cache();
int gpu_num;
cudaError_t err = cudaGetDeviceCount(&gpu_num);
std::atexit([](){
SyncMem::clear_cache();
});
}
|
9d9c66d214451ef5b608e264267f9cd4fb7eec46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
__global__ void square_elements(float* in, float* out, int M, int N);
int main()
{
uint M = 5000;
uint N = 5000;
float* inputPtr = (float*)malloc(sizeof(float)*M*N);
float* outputPtr = (float*)malloc(sizeof(float)*M*N);
int m, n;
for (m = 0; m < M; m++){
for (n = 0; n < N; n++){
*(inputPtr + m*N + n) = m+n;
}
}
// for GPU
float* inputGPUPtr;
float* outputGPUPtr;
/* create input and output array on GPU. */
hipMalloc((void**) &inputGPUPtr, sizeof(float)*M*N);
hipMalloc((void**) &outputGPUPtr, sizeof(float)*M*N);
/* The input array is single precision, it can be sent directly to the
card */
hipMemcpy(inputGPUPtr, inputPtr, sizeof(float)*M*N, hipMemcpyHostToDevice);
/* run the kernel function. */
int blockSize = 256;
int nBlocks = (M*N)/blockSize + ((M*N)%blockSize == 0?0:1);
printf("blockSize: %d, nBlocks = %d\n", blockSize, nBlocks);
dim3 dimBlock(blockSize);
dim3 dimGrid(nBlocks);
hipLaunchKernelGGL(( square_elements), dim3(dimGrid), dim3(dimBlock), 0, 0, inputGPUPtr, outputGPUPtr, M, N);
/* Send results back to cpu memeory */
hipMemcpy(outputPtr, outputGPUPtr, sizeof(float)*M*N, hipMemcpyDeviceToHost);
/* clean up. */
hipFree(inputGPUPtr);
hipFree(outputGPUPtr);
free(inputPtr);
free(outputPtr);
/* Scratch pad. */
}
/* Kernel to square elements of the array on the GPU */
__global__ void square_elements(float* in, float* out, int M, int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if ( idx < N) out[idx]= in[idx] * in[idx];
}
| 9d9c66d214451ef5b608e264267f9cd4fb7eec46.cu | #include "cuda.h"
#include <math.h>
#include <stdio.h>
__global__ void square_elements(float* in, float* out, int M, int N);
int main()
{
uint M = 5000;
uint N = 5000;
float* inputPtr = (float*)malloc(sizeof(float)*M*N);
float* outputPtr = (float*)malloc(sizeof(float)*M*N);
int m, n;
for (m = 0; m < M; m++){
for (n = 0; n < N; n++){
*(inputPtr + m*N + n) = m+n;
}
}
// for GPU
float* inputGPUPtr;
float* outputGPUPtr;
/* create input and output array on GPU. */
cudaMalloc((void**) &inputGPUPtr, sizeof(float)*M*N);
cudaMalloc((void**) &outputGPUPtr, sizeof(float)*M*N);
/* The input array is single precision, it can be sent directly to the
card */
cudaMemcpy(inputGPUPtr, inputPtr, sizeof(float)*M*N, cudaMemcpyHostToDevice);
/* run the kernel function. */
int blockSize = 256;
int nBlocks = (M*N)/blockSize + ((M*N)%blockSize == 0?0:1);
printf("blockSize: %d, nBlocks = %d\n", blockSize, nBlocks);
dim3 dimBlock(blockSize);
dim3 dimGrid(nBlocks);
square_elements<<<dimGrid, dimBlock>>>(inputGPUPtr, outputGPUPtr, M, N);
/* Send results back to cpu memeory */
cudaMemcpy(outputPtr, outputGPUPtr, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
/* clean up. */
cudaFree(inputGPUPtr);
cudaFree(outputGPUPtr);
free(inputPtr);
free(outputPtr);
/* Scratch pad. */
}
/* Kernel to square elements of the array on the GPU */
__global__ void square_elements(float* in, float* out, int M, int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if ( idx < N) out[idx]= in[idx] * in[idx];
}
|
a3b2faeb439e30bbebad6c17932650287fa934bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CUDASAFECALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDACHECKERROR() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(hipError_t err, const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (hipSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
fprintf(stdout, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
fprintf(stdout, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
| a3b2faeb439e30bbebad6c17932650287fa934bc.cu | #include <stdio.h>
#include <stdlib.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CUDASAFECALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDACHECKERROR() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
fprintf(stdout, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
fprintf(stdout, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
|
737cfb925e9d14de207bdf91f34430b31ecea826.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FixHelpers.h"
#include "helpers.h"
#include "FixAngleHarmonic.h"
#include "cutils_func.h"
#include "AngleEvaluate.h"
using namespace std;
const string angleHarmonicType = "AngleHarmonic";
FixAngleHarmonic::FixAngleHarmonic(boost::shared_ptr<State> state_, string handle)
: FixPotentialMultiAtom(state_, handle, angleHarmonicType, true)
{
readFromRestart();
}
namespace py = boost::python;
void FixAngleHarmonic::compute(int virialMode) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
GPUData &gpd = state->gpd;
if (forcersGPU.size()) {
if (virialMode) {
hipLaunchKernelGGL(( compute_force_angle<AngleHarmonicType, AngleEvaluatorHarmonic, true>) , dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams, 0, nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
} else {
hipLaunchKernelGGL(( compute_force_angle<AngleHarmonicType, AngleEvaluatorHarmonic, false>) , dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams, 0, nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
}
}
}
void FixAngleHarmonic::singlePointEng(float *perParticleEng) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
if (forcersGPU.size()) {
hipLaunchKernelGGL(( compute_energy_angle), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams, 0, nAtoms, state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator);
}
}
//void cumulativeSum(int *data, int n);
// okay, so the net result of this function is that two arrays (items, idxs of
// items) are on the gpu and we know how many bonds are in bondiest block
void FixAngleHarmonic::createAngle(Atom *a, Atom *b, Atom *c, double k, double theta0, int type) {
vector<Atom *> atoms = {a, b, c};
validAtoms(atoms);
if (type == -1) {
assert(k!=COEF_DEFAULT and theta0!=COEF_DEFAULT);
}
forcers.push_back(AngleHarmonic(a, b, c, k, theta0, type));
pyListInterface.updateAppendedMember();
}
void FixAngleHarmonic::setAngleTypeCoefs(int type, double k, double theta0) {
//cout << type << " " << k << " " << theta0 << endl;
mdAssert(theta0>=0 and theta0 <= M_PI, "Angle theta must be between zero and pi");
AngleHarmonic dummy(k, theta0);
setForcerType(type, dummy);
}
bool FixAngleHarmonic::readFromRestart() {
auto restData = getRestartNode();
if (restData) {
auto curr_node = restData.first_child();
while (curr_node) {
std::string tag = curr_node.name();
if (tag == "types") {
for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) {
int type;
double k;
double theta0;
std::string type_ = type_node.attribute("id").value();
type = atoi(type_.c_str());
std::string k_ = type_node.attribute("k").value();
std::string theta0_ = type_node.attribute("theta0").value();
k = atof(k_.c_str());
theta0 = atof(theta0_.c_str());
setAngleTypeCoefs(type, k, theta0);
}
} else if (tag == "members") {
for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) {
int type;
double k;
double theta0;
int ids[3];
std::string type_ = member_node.attribute("type").value();
std::string atom_a = member_node.attribute("atomID_a").value();
std::string atom_b = member_node.attribute("atomID_b").value();
std::string atom_c = member_node.attribute("atomID_c").value();
std::string k_ = member_node.attribute("k").value();
std::string theta0_ = member_node.attribute("theta0").value();
type = atoi(type_.c_str());
ids[0] = atoi(atom_a.c_str());
ids[1] = atoi(atom_b.c_str());
ids[2] = atoi(atom_c.c_str());
Atom * a = &state->idToAtom(ids[0]);
Atom * b = &state->idToAtom(ids[1]);
Atom * c = &state->idToAtom(ids[2]);
k = atof(k_.c_str());
theta0 = atof(theta0_.c_str());
createAngle(a, b, c, k, theta0, type);
}
}
curr_node = curr_node.next_sibling();
}
}
return true;
}
void export_FixAngleHarmonic() {
boost::python::class_<FixAngleHarmonic,
boost::shared_ptr<FixAngleHarmonic>,
boost::python::bases<Fix, TypedItemHolder> >(
"FixAngleHarmonic",
boost::python::init<boost::shared_ptr<State>, string>(
boost::python::args("state", "handle"))
)
.def("createAngle", &FixAngleHarmonic::createAngle,
(boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("theta0")=COEF_DEFAULT,
boost::python::arg("type")=-1)
)
.def("setAngleTypeCoefs", &FixAngleHarmonic::setAngleTypeCoefs,
(boost::python::arg("type")=-1,
boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("theta0")=COEF_DEFAULT
)
)
.def_readonly("angles", &FixAngleHarmonic::pyForcers)
;
}
| 737cfb925e9d14de207bdf91f34430b31ecea826.cu |
#include "FixHelpers.h"
#include "helpers.h"
#include "FixAngleHarmonic.h"
#include "cutils_func.h"
#include "AngleEvaluate.h"
using namespace std;
const string angleHarmonicType = "AngleHarmonic";
FixAngleHarmonic::FixAngleHarmonic(boost::shared_ptr<State> state_, string handle)
: FixPotentialMultiAtom(state_, handle, angleHarmonicType, true)
{
readFromRestart();
}
namespace py = boost::python;
void FixAngleHarmonic::compute(int virialMode) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
GPUData &gpd = state->gpd;
if (forcersGPU.size()) {
if (virialMode) {
compute_force_angle<AngleHarmonicType, AngleEvaluatorHarmonic, true> <<<NBLOCK(nAtoms), PERBLOCK, sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams>>>(nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
} else {
compute_force_angle<AngleHarmonicType, AngleEvaluatorHarmonic, false> <<<NBLOCK(nAtoms), PERBLOCK, sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams>>>(nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx), gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), gpd.virials.d_data.data(), usingSharedMemForParams, evaluator);
}
}
}
void FixAngleHarmonic::singlePointEng(float *perParticleEng) {
int nAtoms = state->atoms.size();
int activeIdx = state->gpd.activeIdx();
if (forcersGPU.size()) {
compute_energy_angle<<<NBLOCK(nAtoms), PERBLOCK, sizeof(AngleGPU) * maxForcersPerBlock + sharedMemSizeForParams>>>(nAtoms, state->gpd.xs(activeIdx), perParticleEng, state->gpd.idToIdxs.d_data.data(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU, parameters.data(), parameters.size(), usingSharedMemForParams, evaluator);
}
}
//void cumulativeSum(int *data, int n);
// okay, so the net result of this function is that two arrays (items, idxs of
// items) are on the gpu and we know how many bonds are in bondiest block
void FixAngleHarmonic::createAngle(Atom *a, Atom *b, Atom *c, double k, double theta0, int type) {
vector<Atom *> atoms = {a, b, c};
validAtoms(atoms);
if (type == -1) {
assert(k!=COEF_DEFAULT and theta0!=COEF_DEFAULT);
}
forcers.push_back(AngleHarmonic(a, b, c, k, theta0, type));
pyListInterface.updateAppendedMember();
}
void FixAngleHarmonic::setAngleTypeCoefs(int type, double k, double theta0) {
//cout << type << " " << k << " " << theta0 << endl;
mdAssert(theta0>=0 and theta0 <= M_PI, "Angle theta must be between zero and pi");
AngleHarmonic dummy(k, theta0);
setForcerType(type, dummy);
}
bool FixAngleHarmonic::readFromRestart() {
auto restData = getRestartNode();
if (restData) {
auto curr_node = restData.first_child();
while (curr_node) {
std::string tag = curr_node.name();
if (tag == "types") {
for (auto type_node = curr_node.first_child(); type_node; type_node = type_node.next_sibling()) {
int type;
double k;
double theta0;
std::string type_ = type_node.attribute("id").value();
type = atoi(type_.c_str());
std::string k_ = type_node.attribute("k").value();
std::string theta0_ = type_node.attribute("theta0").value();
k = atof(k_.c_str());
theta0 = atof(theta0_.c_str());
setAngleTypeCoefs(type, k, theta0);
}
} else if (tag == "members") {
for (auto member_node = curr_node.first_child(); member_node; member_node = member_node.next_sibling()) {
int type;
double k;
double theta0;
int ids[3];
std::string type_ = member_node.attribute("type").value();
std::string atom_a = member_node.attribute("atomID_a").value();
std::string atom_b = member_node.attribute("atomID_b").value();
std::string atom_c = member_node.attribute("atomID_c").value();
std::string k_ = member_node.attribute("k").value();
std::string theta0_ = member_node.attribute("theta0").value();
type = atoi(type_.c_str());
ids[0] = atoi(atom_a.c_str());
ids[1] = atoi(atom_b.c_str());
ids[2] = atoi(atom_c.c_str());
Atom * a = &state->idToAtom(ids[0]);
Atom * b = &state->idToAtom(ids[1]);
Atom * c = &state->idToAtom(ids[2]);
k = atof(k_.c_str());
theta0 = atof(theta0_.c_str());
createAngle(a, b, c, k, theta0, type);
}
}
curr_node = curr_node.next_sibling();
}
}
return true;
}
void export_FixAngleHarmonic() {
boost::python::class_<FixAngleHarmonic,
boost::shared_ptr<FixAngleHarmonic>,
boost::python::bases<Fix, TypedItemHolder> >(
"FixAngleHarmonic",
boost::python::init<boost::shared_ptr<State>, string>(
boost::python::args("state", "handle"))
)
.def("createAngle", &FixAngleHarmonic::createAngle,
(boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("theta0")=COEF_DEFAULT,
boost::python::arg("type")=-1)
)
.def("setAngleTypeCoefs", &FixAngleHarmonic::setAngleTypeCoefs,
(boost::python::arg("type")=-1,
boost::python::arg("k")=COEF_DEFAULT,
boost::python::arg("theta0")=COEF_DEFAULT
)
)
.def_readonly("angles", &FixAngleHarmonic::pyForcers)
;
}
|
8bed9f4b64e6e7d6454b2794d7f3b06d18941974.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include "cuda_utils.h"
#include "gpu_utils.h"
#include "HoloConst.h"
#include "hostXYZ.h"
#include "Bonded_struct.h"
void test();
//
// Main
//
int main(int argc, char *argv[]) {
int numnode = 1;
int mynode = 0;
std::vector<int> devices;
start_gpu(numnode, mynode, devices);
test();
return 0;
}
//
// Loads (x, y, z) coordinates from file
//
void load_coord(const char *filename, const int n, double *x, double *y, double *z) {
std::ifstream file(filename);
if (file.is_open()) {
int i = 0;
while (file >> x[i] >> y[i] >> z[i]) i++;
if (i > n) {
std::cerr<<"Too many lines in file "<<filename<<std::endl;
exit(1);
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads (x) coordinates from file
//
void load_coord(const char *filename, const int n, double *x) {
std::ifstream file(filename);
if (file.is_open()) {
int i = 0;
while (file >> x[i]) i++;
if (i > n) {
std::cerr<<"Too many lines in file "<<filename<<std::endl;
exit(1);
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads vector from file
//
template <typename T>
void load_vec(const int nind, const char *filename, const int n, T *ind) {
std::ifstream file(filename);
if (file.is_open()) {
for (int i=0;i < n;i++) {
for (int k=0;k < nind;k++) {
if (!(file >> ind[i*nind+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads constraints and masses from file
//
void load_constr_mass(const int nconstr, const int nmass, const char *filename, const int n,
double *constr, double *mass) {
std::ifstream file(filename);
if (file.is_open()) {
for (int i=0;i < n;i++) {
for (int k=0;k < nconstr;k++) {
if (!(file >> constr[i*nconstr+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
for (int k=0;k < nmass;k++) {
if (!(file >> mass[i*nmass+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Checks SETTLE and SHAKE results
//
bool check_result(const int nind, const int n, const int *ind,
const double *x, const double *y, const double *z,
const double *x_ref, const double *y_ref, const double *z_ref,
const double tol, double &max_diff) {
double x1, y1, z1;
double x2, y2, z2;
double diff;
int imol, j, i;
for (imol=0;imol < n;imol++) {
for (j=0;j < nind;j++) {
i = ind[imol*nind+j];
x1 = x[i];
y1 = y[i];
z1 = z[i];
x2 = x_ref[i];
y2 = y_ref[i];
z2 = z_ref[i];
bool ok = true;
if (isnan(x1) || isnan(y1) || isnan(z1) || isnan(x2) || isnan(y2) || isnan(z2)) ok = false;
if (ok) {
diff = max(fabs(x1-x2), max(fabs(y1-y2), fabs(z1-z2)));
max_diff = max(diff, max_diff);
if (diff > tol) ok = false;
}
if (!ok) {
std::cout << "comparison FAILED, imol=" << imol << " diff=" << diff << std::endl;
std::cout << "ind =";
for (j=0;j < nind;j++) {
std::cout << " " << ind[imol*nind+j];
}
std::cout << std::endl;
std::cout << "computed: " << x1 << " "<< y1 << " "<< z1 << std::endl;
std::cout << "reference: " << x2 << " "<< y2 << " "<< z2 << std::endl;
return false;
}
}
}
return true;
}
//
// Check results
//
void check_results(cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_cor,
const int nsolvent, const solvent_t* h_solvent_ind,
const int npair, const int* h_pair_ind,
const int ntrip, const int* h_trip_ind,
const int nquad, const int* h_quad_ind) {
hostXYZ<double> h_xyz_res(xyz_res);
double max_diff;
double tol;
max_diff = 0.0;
tol = 5.0e-13;
if (check_result(3, nsolvent, (int *)h_solvent_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"solvent SETTLE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-14;
if (check_result(2, npair, h_pair_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"pair SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-10;
if (check_result(3, ntrip, h_trip_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"trip SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-10;
if (check_result(4, nquad, h_quad_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"quad SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
}
//
// Test parametric version
//
void test_parametric(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int* h_pair_ind,
const int ntrip, const int* h_trip_ind,
const int nquad, const int* h_quad_ind,
const int nsolvent, const solvent_t* h_solvent_ind,
cudaXYZ<double>& xyz_ref, cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_start) {
//---------------------------------------------------------------------------
// Load constraint distances and masses
double *h_pair_constr = (double *)malloc(npair*sizeof(double));
double *h_pair_mass = (double *)malloc(npair*2*sizeof(double));
load_constr_mass(1, 2, "test_data/pair_constr_mass.txt", npair, h_pair_constr, h_pair_mass);
double *h_trip_constr = (double *)malloc(ntrip*2*sizeof(double));
double *h_trip_mass = (double *)malloc(ntrip*5*sizeof(double));
load_constr_mass(2, 5, "test_data/trip_constr_mass.txt", ntrip, h_trip_constr, h_trip_mass);
double *h_quad_constr = (double *)malloc(nquad*3*sizeof(double));
double *h_quad_mass = (double *)malloc(nquad*7*sizeof(double));
load_constr_mass(3, 7, "test_data/quad_constr_mass.txt", nquad, h_quad_constr, h_quad_mass);
//---------------------------------------------------------------------------
HoloConst holoconst;
// Setup
holoconst.setup_solvent_parameters(mO, mH, rOHsq, rHHsq);
holoconst.setup_ind_mass_constr(npair, (int2 *)h_pair_ind, h_pair_constr, h_pair_mass,
ntrip, (int3 *)h_trip_ind, h_trip_constr, h_trip_mass,
nquad, (int4 *)h_quad_ind, h_quad_constr, h_quad_mass,
nsolvent, h_solvent_ind);
// Apply holonomic constraints, result is in xyz_res
xyz_res.set_data_sync(h_xyz_start);
holoconst.apply(xyz_ref, xyz_res);
cudaCheck(hipDeviceSynchronize());
free(h_pair_constr);
free(h_pair_mass);
free(h_trip_constr);
free(h_trip_mass);
free(h_quad_constr);
free(h_quad_mass);
}
//
// Test indexed version
//
void test_indexed(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int ntrip, const int nquad,
const int nsolvent, const solvent_t* h_solvent_ind,
cudaXYZ<double>& xyz_ref, cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_start) {
const int npair_type = 9;
const int ntrip_type = 3;
const int nquad_type = 2;
double *h_pair_constr = new double[npair_type];
double *h_pair_mass = new double[npair_type*2];
load_constr_mass(1, 2, "test_data/pair_types.txt", npair_type, h_pair_constr, h_pair_mass);
bond_t* h_pair_indtype = new bond_t[npair];
load_vec<int>(3, "test_data/pair_indtype.txt", npair, (int *)h_pair_indtype);
double *h_trip_constr = new double[ntrip_type*2];
double *h_trip_mass = new double[ntrip_type*5];
load_constr_mass(2, 5, "test_data/trip_types.txt", ntrip_type, h_trip_constr, h_trip_mass);
angle_t* h_trip_indtype = new angle_t[ntrip];
load_vec<int>(4, "test_data/trip_indtype.txt", ntrip, (int *)h_trip_indtype);
double *h_quad_constr = new double[nquad_type*3];
double *h_quad_mass = new double[nquad_type*7];
load_constr_mass(3, 7, "test_data/quad_types.txt", nquad_type, h_quad_constr, h_quad_mass);
dihe_t* h_quad_indtype = new dihe_t[nquad];
load_vec<int>(5, "test_data/quad_indtype.txt", nquad, (int *)h_quad_indtype);
// Setup
HoloConst holoconst;
holoconst.setup_solvent_parameters(mO, mH, rOHsq, rHHsq);
holoconst.setup_indexed(npair, h_pair_indtype, npair_type, h_pair_constr, h_pair_mass,
ntrip, h_trip_indtype, ntrip_type, h_trip_constr, h_trip_mass,
nquad, h_quad_indtype, nquad_type, h_quad_constr, h_quad_mass,
nsolvent, h_solvent_ind);
// Apply holonomic constraints
xyz_res.set_data_sync(h_xyz_start);
holoconst.apply(xyz_ref, xyz_res);
cudaCheck(hipDeviceSynchronize());
delete [] h_pair_indtype;
delete [] h_trip_indtype;
delete [] h_quad_indtype;
delete [] h_pair_constr;
delete [] h_pair_mass;
delete [] h_trip_constr;
delete [] h_trip_mass;
delete [] h_quad_constr;
delete [] h_quad_mass;
}
/*
//
// Test indexed version with SETTLE for triplets
//
void test_indexed_settle(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int ntrip, const int nquad,
const int nsolvent, const int3* h_solvent_ind,
cudaXYZ<double>& xyz0, cudaXYZ<double>& xyz1,
hostXYZ<double>& h_xyz0, hostXYZ<double>& h_xyz1) {
const int npair_type = 9;
const int ntrip_type = 3;
const int nquad_type = 2;
double *h_pair_constr = new double[npair_type];
double *h_pair_mass = new double[npair_type*2];
load_constr_mass(1, 2, "test_data/pair_types.txt", npair_type, h_pair_constr, h_pair_mass);
bond_t* h_pair_indtype = new bond_t[npair];
load_vec<int>(3, "test_data/pair_indtype.txt", npair, (int *)h_pair_indtype);
double *h_trip_constr = new double[ntrip_type*2];
double *h_trip_mass = new double[ntrip_type*5];
load_constr_mass(2, 5, "test_data/trip_types.txt", ntrip_type, h_trip_constr, h_trip_mass);
angle_t* h_trip_indtype = new angle_t[ntrip];
load_vec<int>(4, "test_data/trip_indtype.txt", ntrip, (int *)h_trip_indtype);
// Merge triplets with solvent
angle_t* h_settle_ind = new angle_t[nsolvent + ntrip];
for (int i=0;i < nsolvent;i++) {
h_settle_ind[i].i = h_solvent_ind[i].x;
h_settle_ind[i].j = h_solvent_ind[i].y;
h_settle_ind[i].k = h_solvent_ind[i].z;
h_settle_ind[i].itype = ntrip_type;
}
for (int i=nsolvent;i < nsolvent+ntrip;i++) {
h_settle_ind[i].i = h_trip_indtype[i-nsolvent].i;
h_settle_ind[i].j = h_trip_indtype[i-nsolvent].j;
h_settle_ind[i].k = h_trip_indtype[i-nsolvent].k;
h_settle_ind[i].itype = h_trip_indtype[i-nsolvent].itype;
}
double* h_massP = new double[ntrip_type+1];
double* h_massH = new double[ntrip_type+1];
double* h_rPHsq = new double[ntrip_type+1];
double* h_rHHsq = new double[ntrip_type+1];
for (int i=0;i < ntrip_type;i++) {
h_massP[i] = 1.0/h_trip_mass[i*5];
h_massH[i] = 1.0/h_trip_mass[i*5+1];
h_rPHsq[i] = h_trip_constr[i*2];
int j;
for (j=0;j < ntrip;j++) if (h_trip_indtype[j].itype == i) break;
int jj = h_trip_indtype[j].j;
int kk = h_trip_indtype[j].k;
double xjk = h_xyz0.x()[jj] - h_xyz0.x()[kk];
double yjk = h_xyz0.y()[jj] - h_xyz0.y()[kk];
double zjk = h_xyz0.z()[jj] - h_xyz0.z()[kk];
h_rHHsq[i] = xjk*xjk + yjk*yjk + zjk*zjk;
}
h_massP[ntrip_type] = mO;
h_massH[ntrip_type] = mH;
h_rPHsq[ntrip_type] = rOHsq;
h_rHHsq[ntrip_type] = rHHsq;
double *h_quad_constr = new double[nquad_type*3];
double *h_quad_mass = new double[nquad_type*7];
load_constr_mass(3, 7, "test_data/quad_types.txt", nquad_type, h_quad_constr, h_quad_mass);
dihe_t* h_quad_indtype = new dihe_t[nquad];
load_vec<int>(5, "test_data/quad_indtype.txt", nquad, (int *)h_quad_indtype);
// Setup
HoloConst holoconst;
holoconst.setup_settle_parameters(ntrip_type+1, h_massP, h_massH, h_rPHsq, h_rHHsq);
holoconst.setup_indexed(npair, h_pair_indtype, npair_type, h_pair_constr, h_pair_mass,
nquad, h_quad_indtype, nquad_type, h_quad_constr, h_quad_mass,
nsolvent+ntrip, h_settle_ind);
// Apply holonomic constraints
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
cudaCheck(hipDeviceSynchronize());
delete [] h_pair_indtype;
delete [] h_trip_indtype;
delete [] h_quad_indtype;
delete [] h_settle_ind;
delete [] h_massP;
delete [] h_massH;
delete [] h_rPHsq;
delete [] h_rHHsq;
delete [] h_pair_constr;
delete [] h_pair_mass;
delete [] h_trip_constr;
delete [] h_trip_mass;
delete [] h_quad_constr;
delete [] h_quad_mass;
}
*/
//
// Test the code using data in test_data/ -directory
//
void test() {
// Settings for the data:
const double mO = 15.9994;
const double mH = 1.008;
const double rOHsq = 0.91623184;
const double rHHsq = 2.29189321;
const int ncoord = 23558;
const int nsolvent = 7023;
const int npair = 458;
const int ntrip = 233;
const int nquad = 99;
cudaXYZ<double> xyz_ref(ncoord);
cudaXYZ<double> xyz_res(ncoord);
// Load coordinates
hostXYZ<double> h_xyz_ref(ncoord, NON_PINNED);
hostXYZ<double> h_xyz_start(ncoord, NON_PINNED);
hostXYZ<double> h_xyz_cor(ncoord, NON_PINNED);
// Reference coordinates
load_coord("test_data/xref.txt", h_xyz_ref.size(), h_xyz_ref.x());
load_coord("test_data/yref.txt", h_xyz_ref.size(), h_xyz_ref.y());
load_coord("test_data/zref.txt", h_xyz_ref.size(), h_xyz_ref.z());
// Starting coordinates
load_coord("test_data/xstart.txt", h_xyz_start.size(), h_xyz_start.x());
load_coord("test_data/ystart.txt", h_xyz_start.size(), h_xyz_start.y());
load_coord("test_data/zstart.txt", h_xyz_start.size(), h_xyz_start.z());
// Correct result coordinates
load_coord("test_data/xcor.txt", h_xyz_cor.size(), h_xyz_cor.x());
load_coord("test_data/ycor.txt", h_xyz_cor.size(), h_xyz_cor.y());
load_coord("test_data/zcor.txt", h_xyz_cor.size(), h_xyz_cor.z());
// Set reference data, this never changes
xyz_ref.set_data_sync(h_xyz_ref);
// Load constraint indices
solvent_t *h_solvent_ind = new solvent_t[nsolvent];
load_vec<int>(3, "test_data/solvent_ind.txt", nsolvent, (int *)h_solvent_ind);
int *h_pair_ind = (int *)malloc(npair*2*sizeof(int));
load_vec<int>(2, "test_data/pair_ind.txt", npair, h_pair_ind);
int *h_trip_ind = (int *)malloc(ntrip*3*sizeof(int));
load_vec<int>(3, "test_data/trip_ind.txt", ntrip, h_trip_ind);
int *h_quad_ind = (int *)malloc(nquad*4*sizeof(int));
load_vec<int>(4, "test_data/quad_ind.txt", nquad, h_quad_ind);
//-------------------------
// Test parametric
//-------------------------
test_parametric(mO, mH, rOHsq, rHHsq, npair, h_pair_ind, ntrip, h_trip_ind, nquad, h_quad_ind,
nsolvent, h_solvent_ind, xyz_ref, xyz_res, h_xyz_start);
check_results(xyz_res, h_xyz_cor, nsolvent, h_solvent_ind, npair, h_pair_ind,
ntrip, h_trip_ind, nquad, h_quad_ind);
//-------------------------
// Test indexed
//-------------------------
test_indexed(mO, mH, rOHsq, rHHsq, npair, ntrip, nquad, nsolvent, h_solvent_ind,
xyz_ref, xyz_res, h_xyz_start);
check_results(xyz_res, h_xyz_cor, nsolvent, h_solvent_ind, npair, h_pair_ind,
ntrip, h_trip_ind, nquad, h_quad_ind);
delete [] h_solvent_ind;
free(h_pair_ind);
free(h_trip_ind);
free(h_quad_ind);
}
| 8bed9f4b64e6e7d6454b2794d7f3b06d18941974.cu | #include <iostream>
#include <fstream>
#include <cuda.h>
#include "cuda_utils.h"
#include "gpu_utils.h"
#include "HoloConst.h"
#include "hostXYZ.h"
#include "Bonded_struct.h"
void test();
//
// Main
//
int main(int argc, char *argv[]) {
int numnode = 1;
int mynode = 0;
std::vector<int> devices;
start_gpu(numnode, mynode, devices);
test();
return 0;
}
//
// Loads (x, y, z) coordinates from file
//
void load_coord(const char *filename, const int n, double *x, double *y, double *z) {
std::ifstream file(filename);
if (file.is_open()) {
int i = 0;
while (file >> x[i] >> y[i] >> z[i]) i++;
if (i > n) {
std::cerr<<"Too many lines in file "<<filename<<std::endl;
exit(1);
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads (x) coordinates from file
//
void load_coord(const char *filename, const int n, double *x) {
std::ifstream file(filename);
if (file.is_open()) {
int i = 0;
while (file >> x[i]) i++;
if (i > n) {
std::cerr<<"Too many lines in file "<<filename<<std::endl;
exit(1);
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads vector from file
//
template <typename T>
void load_vec(const int nind, const char *filename, const int n, T *ind) {
std::ifstream file(filename);
if (file.is_open()) {
for (int i=0;i < n;i++) {
for (int k=0;k < nind;k++) {
if (!(file >> ind[i*nind+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Loads constraints and masses from file
//
void load_constr_mass(const int nconstr, const int nmass, const char *filename, const int n,
double *constr, double *mass) {
std::ifstream file(filename);
if (file.is_open()) {
for (int i=0;i < n;i++) {
for (int k=0;k < nconstr;k++) {
if (!(file >> constr[i*nconstr+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
for (int k=0;k < nmass;k++) {
if (!(file >> mass[i*nmass+k])) {
std::cerr<<"Error reading file "<<filename<<std::endl;
exit(1);
}
}
}
} else {
std::cerr<<"Error opening file "<<filename<<std::endl;
exit(1);
}
}
//
// Checks SETTLE and SHAKE results
//
bool check_result(const int nind, const int n, const int *ind,
const double *x, const double *y, const double *z,
const double *x_ref, const double *y_ref, const double *z_ref,
const double tol, double &max_diff) {
double x1, y1, z1;
double x2, y2, z2;
double diff;
int imol, j, i;
for (imol=0;imol < n;imol++) {
for (j=0;j < nind;j++) {
i = ind[imol*nind+j];
x1 = x[i];
y1 = y[i];
z1 = z[i];
x2 = x_ref[i];
y2 = y_ref[i];
z2 = z_ref[i];
bool ok = true;
if (isnan(x1) || isnan(y1) || isnan(z1) || isnan(x2) || isnan(y2) || isnan(z2)) ok = false;
if (ok) {
diff = max(fabs(x1-x2), max(fabs(y1-y2), fabs(z1-z2)));
max_diff = max(diff, max_diff);
if (diff > tol) ok = false;
}
if (!ok) {
std::cout << "comparison FAILED, imol=" << imol << " diff=" << diff << std::endl;
std::cout << "ind =";
for (j=0;j < nind;j++) {
std::cout << " " << ind[imol*nind+j];
}
std::cout << std::endl;
std::cout << "computed: " << x1 << " "<< y1 << " "<< z1 << std::endl;
std::cout << "reference: " << x2 << " "<< y2 << " "<< z2 << std::endl;
return false;
}
}
}
return true;
}
//
// Check results
//
void check_results(cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_cor,
const int nsolvent, const solvent_t* h_solvent_ind,
const int npair, const int* h_pair_ind,
const int ntrip, const int* h_trip_ind,
const int nquad, const int* h_quad_ind) {
hostXYZ<double> h_xyz_res(xyz_res);
double max_diff;
double tol;
max_diff = 0.0;
tol = 5.0e-13;
if (check_result(3, nsolvent, (int *)h_solvent_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"solvent SETTLE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-14;
if (check_result(2, npair, h_pair_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"pair SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-10;
if (check_result(3, ntrip, h_trip_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"trip SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
max_diff = 0.0;
tol = 5.0e-10;
if (check_result(4, nquad, h_quad_ind, h_xyz_res.x(), h_xyz_res.y(), h_xyz_res.z(),
h_xyz_cor.x(), h_xyz_cor.y(), h_xyz_cor.z(), tol, max_diff)) {
std::cout<<"quad SHAKE OK (tolerance " << tol << " max difference " <<
max_diff << ")" << std::endl;
}
}
//
// Test parametric version
//
void test_parametric(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int* h_pair_ind,
const int ntrip, const int* h_trip_ind,
const int nquad, const int* h_quad_ind,
const int nsolvent, const solvent_t* h_solvent_ind,
cudaXYZ<double>& xyz_ref, cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_start) {
//---------------------------------------------------------------------------
// Load constraint distances and masses
double *h_pair_constr = (double *)malloc(npair*sizeof(double));
double *h_pair_mass = (double *)malloc(npair*2*sizeof(double));
load_constr_mass(1, 2, "test_data/pair_constr_mass.txt", npair, h_pair_constr, h_pair_mass);
double *h_trip_constr = (double *)malloc(ntrip*2*sizeof(double));
double *h_trip_mass = (double *)malloc(ntrip*5*sizeof(double));
load_constr_mass(2, 5, "test_data/trip_constr_mass.txt", ntrip, h_trip_constr, h_trip_mass);
double *h_quad_constr = (double *)malloc(nquad*3*sizeof(double));
double *h_quad_mass = (double *)malloc(nquad*7*sizeof(double));
load_constr_mass(3, 7, "test_data/quad_constr_mass.txt", nquad, h_quad_constr, h_quad_mass);
//---------------------------------------------------------------------------
HoloConst holoconst;
// Setup
holoconst.setup_solvent_parameters(mO, mH, rOHsq, rHHsq);
holoconst.setup_ind_mass_constr(npair, (int2 *)h_pair_ind, h_pair_constr, h_pair_mass,
ntrip, (int3 *)h_trip_ind, h_trip_constr, h_trip_mass,
nquad, (int4 *)h_quad_ind, h_quad_constr, h_quad_mass,
nsolvent, h_solvent_ind);
// Apply holonomic constraints, result is in xyz_res
xyz_res.set_data_sync(h_xyz_start);
holoconst.apply(xyz_ref, xyz_res);
cudaCheck(cudaDeviceSynchronize());
free(h_pair_constr);
free(h_pair_mass);
free(h_trip_constr);
free(h_trip_mass);
free(h_quad_constr);
free(h_quad_mass);
}
//
// Test indexed version
//
void test_indexed(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int ntrip, const int nquad,
const int nsolvent, const solvent_t* h_solvent_ind,
cudaXYZ<double>& xyz_ref, cudaXYZ<double>& xyz_res, hostXYZ<double>& h_xyz_start) {
const int npair_type = 9;
const int ntrip_type = 3;
const int nquad_type = 2;
double *h_pair_constr = new double[npair_type];
double *h_pair_mass = new double[npair_type*2];
load_constr_mass(1, 2, "test_data/pair_types.txt", npair_type, h_pair_constr, h_pair_mass);
bond_t* h_pair_indtype = new bond_t[npair];
load_vec<int>(3, "test_data/pair_indtype.txt", npair, (int *)h_pair_indtype);
double *h_trip_constr = new double[ntrip_type*2];
double *h_trip_mass = new double[ntrip_type*5];
load_constr_mass(2, 5, "test_data/trip_types.txt", ntrip_type, h_trip_constr, h_trip_mass);
angle_t* h_trip_indtype = new angle_t[ntrip];
load_vec<int>(4, "test_data/trip_indtype.txt", ntrip, (int *)h_trip_indtype);
double *h_quad_constr = new double[nquad_type*3];
double *h_quad_mass = new double[nquad_type*7];
load_constr_mass(3, 7, "test_data/quad_types.txt", nquad_type, h_quad_constr, h_quad_mass);
dihe_t* h_quad_indtype = new dihe_t[nquad];
load_vec<int>(5, "test_data/quad_indtype.txt", nquad, (int *)h_quad_indtype);
// Setup
HoloConst holoconst;
holoconst.setup_solvent_parameters(mO, mH, rOHsq, rHHsq);
holoconst.setup_indexed(npair, h_pair_indtype, npair_type, h_pair_constr, h_pair_mass,
ntrip, h_trip_indtype, ntrip_type, h_trip_constr, h_trip_mass,
nquad, h_quad_indtype, nquad_type, h_quad_constr, h_quad_mass,
nsolvent, h_solvent_ind);
// Apply holonomic constraints
xyz_res.set_data_sync(h_xyz_start);
holoconst.apply(xyz_ref, xyz_res);
cudaCheck(cudaDeviceSynchronize());
delete [] h_pair_indtype;
delete [] h_trip_indtype;
delete [] h_quad_indtype;
delete [] h_pair_constr;
delete [] h_pair_mass;
delete [] h_trip_constr;
delete [] h_trip_mass;
delete [] h_quad_constr;
delete [] h_quad_mass;
}
/*
//
// Test indexed version with SETTLE for triplets
//
void test_indexed_settle(const double mO, const double mH, const double rOHsq, const double rHHsq,
const int npair, const int ntrip, const int nquad,
const int nsolvent, const int3* h_solvent_ind,
cudaXYZ<double>& xyz0, cudaXYZ<double>& xyz1,
hostXYZ<double>& h_xyz0, hostXYZ<double>& h_xyz1) {
const int npair_type = 9;
const int ntrip_type = 3;
const int nquad_type = 2;
double *h_pair_constr = new double[npair_type];
double *h_pair_mass = new double[npair_type*2];
load_constr_mass(1, 2, "test_data/pair_types.txt", npair_type, h_pair_constr, h_pair_mass);
bond_t* h_pair_indtype = new bond_t[npair];
load_vec<int>(3, "test_data/pair_indtype.txt", npair, (int *)h_pair_indtype);
double *h_trip_constr = new double[ntrip_type*2];
double *h_trip_mass = new double[ntrip_type*5];
load_constr_mass(2, 5, "test_data/trip_types.txt", ntrip_type, h_trip_constr, h_trip_mass);
angle_t* h_trip_indtype = new angle_t[ntrip];
load_vec<int>(4, "test_data/trip_indtype.txt", ntrip, (int *)h_trip_indtype);
// Merge triplets with solvent
angle_t* h_settle_ind = new angle_t[nsolvent + ntrip];
for (int i=0;i < nsolvent;i++) {
h_settle_ind[i].i = h_solvent_ind[i].x;
h_settle_ind[i].j = h_solvent_ind[i].y;
h_settle_ind[i].k = h_solvent_ind[i].z;
h_settle_ind[i].itype = ntrip_type;
}
for (int i=nsolvent;i < nsolvent+ntrip;i++) {
h_settle_ind[i].i = h_trip_indtype[i-nsolvent].i;
h_settle_ind[i].j = h_trip_indtype[i-nsolvent].j;
h_settle_ind[i].k = h_trip_indtype[i-nsolvent].k;
h_settle_ind[i].itype = h_trip_indtype[i-nsolvent].itype;
}
double* h_massP = new double[ntrip_type+1];
double* h_massH = new double[ntrip_type+1];
double* h_rPHsq = new double[ntrip_type+1];
double* h_rHHsq = new double[ntrip_type+1];
for (int i=0;i < ntrip_type;i++) {
h_massP[i] = 1.0/h_trip_mass[i*5];
h_massH[i] = 1.0/h_trip_mass[i*5+1];
h_rPHsq[i] = h_trip_constr[i*2];
int j;
for (j=0;j < ntrip;j++) if (h_trip_indtype[j].itype == i) break;
int jj = h_trip_indtype[j].j;
int kk = h_trip_indtype[j].k;
double xjk = h_xyz0.x()[jj] - h_xyz0.x()[kk];
double yjk = h_xyz0.y()[jj] - h_xyz0.y()[kk];
double zjk = h_xyz0.z()[jj] - h_xyz0.z()[kk];
h_rHHsq[i] = xjk*xjk + yjk*yjk + zjk*zjk;
}
h_massP[ntrip_type] = mO;
h_massH[ntrip_type] = mH;
h_rPHsq[ntrip_type] = rOHsq;
h_rHHsq[ntrip_type] = rHHsq;
double *h_quad_constr = new double[nquad_type*3];
double *h_quad_mass = new double[nquad_type*7];
load_constr_mass(3, 7, "test_data/quad_types.txt", nquad_type, h_quad_constr, h_quad_mass);
dihe_t* h_quad_indtype = new dihe_t[nquad];
load_vec<int>(5, "test_data/quad_indtype.txt", nquad, (int *)h_quad_indtype);
// Setup
HoloConst holoconst;
holoconst.setup_settle_parameters(ntrip_type+1, h_massP, h_massH, h_rPHsq, h_rHHsq);
holoconst.setup_indexed(npair, h_pair_indtype, npair_type, h_pair_constr, h_pair_mass,
nquad, h_quad_indtype, nquad_type, h_quad_constr, h_quad_mass,
nsolvent+ntrip, h_settle_ind);
// Apply holonomic constraints
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
xyz1.set_data_sync(h_xyz1);
holoconst.apply(xyz0, xyz1);
cudaCheck(cudaDeviceSynchronize());
delete [] h_pair_indtype;
delete [] h_trip_indtype;
delete [] h_quad_indtype;
delete [] h_settle_ind;
delete [] h_massP;
delete [] h_massH;
delete [] h_rPHsq;
delete [] h_rHHsq;
delete [] h_pair_constr;
delete [] h_pair_mass;
delete [] h_trip_constr;
delete [] h_trip_mass;
delete [] h_quad_constr;
delete [] h_quad_mass;
}
*/
//
// Test the code using data in test_data/ -directory
//
void test() {
// Settings for the data:
const double mO = 15.9994;
const double mH = 1.008;
const double rOHsq = 0.91623184;
const double rHHsq = 2.29189321;
const int ncoord = 23558;
const int nsolvent = 7023;
const int npair = 458;
const int ntrip = 233;
const int nquad = 99;
cudaXYZ<double> xyz_ref(ncoord);
cudaXYZ<double> xyz_res(ncoord);
// Load coordinates
hostXYZ<double> h_xyz_ref(ncoord, NON_PINNED);
hostXYZ<double> h_xyz_start(ncoord, NON_PINNED);
hostXYZ<double> h_xyz_cor(ncoord, NON_PINNED);
// Reference coordinates
load_coord("test_data/xref.txt", h_xyz_ref.size(), h_xyz_ref.x());
load_coord("test_data/yref.txt", h_xyz_ref.size(), h_xyz_ref.y());
load_coord("test_data/zref.txt", h_xyz_ref.size(), h_xyz_ref.z());
// Starting coordinates
load_coord("test_data/xstart.txt", h_xyz_start.size(), h_xyz_start.x());
load_coord("test_data/ystart.txt", h_xyz_start.size(), h_xyz_start.y());
load_coord("test_data/zstart.txt", h_xyz_start.size(), h_xyz_start.z());
// Correct result coordinates
load_coord("test_data/xcor.txt", h_xyz_cor.size(), h_xyz_cor.x());
load_coord("test_data/ycor.txt", h_xyz_cor.size(), h_xyz_cor.y());
load_coord("test_data/zcor.txt", h_xyz_cor.size(), h_xyz_cor.z());
// Set reference data, this never changes
xyz_ref.set_data_sync(h_xyz_ref);
// Load constraint indices
solvent_t *h_solvent_ind = new solvent_t[nsolvent];
load_vec<int>(3, "test_data/solvent_ind.txt", nsolvent, (int *)h_solvent_ind);
int *h_pair_ind = (int *)malloc(npair*2*sizeof(int));
load_vec<int>(2, "test_data/pair_ind.txt", npair, h_pair_ind);
int *h_trip_ind = (int *)malloc(ntrip*3*sizeof(int));
load_vec<int>(3, "test_data/trip_ind.txt", ntrip, h_trip_ind);
int *h_quad_ind = (int *)malloc(nquad*4*sizeof(int));
load_vec<int>(4, "test_data/quad_ind.txt", nquad, h_quad_ind);
//-------------------------
// Test parametric
//-------------------------
test_parametric(mO, mH, rOHsq, rHHsq, npair, h_pair_ind, ntrip, h_trip_ind, nquad, h_quad_ind,
nsolvent, h_solvent_ind, xyz_ref, xyz_res, h_xyz_start);
check_results(xyz_res, h_xyz_cor, nsolvent, h_solvent_ind, npair, h_pair_ind,
ntrip, h_trip_ind, nquad, h_quad_ind);
//-------------------------
// Test indexed
//-------------------------
test_indexed(mO, mH, rOHsq, rHHsq, npair, ntrip, nquad, nsolvent, h_solvent_ind,
xyz_ref, xyz_res, h_xyz_start);
check_results(xyz_res, h_xyz_cor, nsolvent, h_solvent_ind, npair, h_pair_ind,
ntrip, h_trip_ind, nquad, h_quad_ind);
delete [] h_solvent_ind;
free(h_pair_ind);
free(h_trip_ind);
free(h_quad_ind);
}
|
b4483787a852303c1acecaaa47fcf480357652f1.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2020, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <gtest/gtest.h>
#include <testHelpers.hpp>
#include <af/array.h>
#include <af/device.h>
using af::allocV2;
using af::freeV2;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
TEST(Memory, AfAllocDeviceCUDA) {
void *ptr;
ASSERT_SUCCESS(af_alloc_device(&ptr, sizeof(float)));
/// Tests to see if the pointer returned can be used by cuda functions
float gold_val = 5;
float *gold = NULL;
ASSERT_EQ(hipSuccess, hipMalloc(&gold, sizeof(float)));
ASSERT_EQ(hipSuccess, hipMemcpy(gold, &gold_val, sizeof(float),
hipMemcpyHostToDevice));
ASSERT_EQ(hipSuccess,
hipMemcpy(ptr, gold, sizeof(float), hipMemcpyDeviceToDevice));
float host;
ASSERT_EQ(hipSuccess,
hipMemcpy(&host, ptr, sizeof(float), hipMemcpyDeviceToHost));
ASSERT_SUCCESS(af_free_device(ptr));
ASSERT_EQ(5, host);
}
#pragma GCC diagnostic pop
TEST(Memory, AfAllocDeviceV2CUDA) {
void *ptr;
ASSERT_SUCCESS(af_alloc_device_v2(&ptr, sizeof(float)));
/// Tests to see if the pointer returned can be used by cuda functions
float gold_val = 5;
float *gold = NULL;
ASSERT_EQ(hipSuccess, hipMalloc(&gold, sizeof(float)));
ASSERT_EQ(hipSuccess, hipMemcpy(gold, &gold_val, sizeof(float),
hipMemcpyHostToDevice));
ASSERT_EQ(hipSuccess,
hipMemcpy(ptr, gold, sizeof(float), hipMemcpyDeviceToDevice));
float host;
ASSERT_EQ(hipSuccess,
hipMemcpy(&host, ptr, sizeof(float), hipMemcpyDeviceToHost));
ASSERT_SUCCESS(af_free_device_v2(ptr));
ASSERT_EQ(5, host);
}
TEST(Memory, SNIPPET_AllocCUDA) {
//! [ex_alloc_v2_cuda]
void *ptr = allocV2(sizeof(float));
float *dptr = static_cast<float *>(ptr);
float host_data = 5.0f;
hipError_t error = hipSuccess;
error = hipMemcpy(dptr, &host_data, sizeof(float), hipMemcpyHostToDevice);
freeV2(ptr);
//! [ex_alloc_v2_cuda]
ASSERT_EQ(hipSuccess, error);
}
| b4483787a852303c1acecaaa47fcf480357652f1.cu | /*******************************************************
* Copyright (c) 2020, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <gtest/gtest.h>
#include <testHelpers.hpp>
#include <af/array.h>
#include <af/device.h>
using af::allocV2;
using af::freeV2;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
TEST(Memory, AfAllocDeviceCUDA) {
void *ptr;
ASSERT_SUCCESS(af_alloc_device(&ptr, sizeof(float)));
/// Tests to see if the pointer returned can be used by cuda functions
float gold_val = 5;
float *gold = NULL;
ASSERT_EQ(cudaSuccess, cudaMalloc(&gold, sizeof(float)));
ASSERT_EQ(cudaSuccess, cudaMemcpy(gold, &gold_val, sizeof(float),
cudaMemcpyHostToDevice));
ASSERT_EQ(cudaSuccess,
cudaMemcpy(ptr, gold, sizeof(float), cudaMemcpyDeviceToDevice));
float host;
ASSERT_EQ(cudaSuccess,
cudaMemcpy(&host, ptr, sizeof(float), cudaMemcpyDeviceToHost));
ASSERT_SUCCESS(af_free_device(ptr));
ASSERT_EQ(5, host);
}
#pragma GCC diagnostic pop
TEST(Memory, AfAllocDeviceV2CUDA) {
void *ptr;
ASSERT_SUCCESS(af_alloc_device_v2(&ptr, sizeof(float)));
/// Tests to see if the pointer returned can be used by cuda functions
float gold_val = 5;
float *gold = NULL;
ASSERT_EQ(cudaSuccess, cudaMalloc(&gold, sizeof(float)));
ASSERT_EQ(cudaSuccess, cudaMemcpy(gold, &gold_val, sizeof(float),
cudaMemcpyHostToDevice));
ASSERT_EQ(cudaSuccess,
cudaMemcpy(ptr, gold, sizeof(float), cudaMemcpyDeviceToDevice));
float host;
ASSERT_EQ(cudaSuccess,
cudaMemcpy(&host, ptr, sizeof(float), cudaMemcpyDeviceToHost));
ASSERT_SUCCESS(af_free_device_v2(ptr));
ASSERT_EQ(5, host);
}
TEST(Memory, SNIPPET_AllocCUDA) {
//! [ex_alloc_v2_cuda]
void *ptr = allocV2(sizeof(float));
float *dptr = static_cast<float *>(ptr);
float host_data = 5.0f;
cudaError_t error = cudaSuccess;
error = cudaMemcpy(dptr, &host_data, sizeof(float), cudaMemcpyHostToDevice);
freeV2(ptr);
//! [ex_alloc_v2_cuda]
ASSERT_EQ(cudaSuccess, error);
}
|
fc7ace23e22443d5f8eb43c392fcce98049d1003.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "ImmersedBoundary.cuh"
#define PI 3.14159
//__device__ const double RHO_0 = 1.;
//__device__ const double C_S = 0.57735;
__device__ const double c_l[9 * 2] = //VELOCITY COMPONENTS
{
0.,0. ,
1.,0. , 0.,1. , -1.,0. , 0.,-1. ,
1.,1. , -1.,1. , -1.,-1. , 1.,-1.
};
__device__ double delta(const double & xs, const double & ys, const int & x, const int & y)
{
double deltax(0.), deltay(0.), delta(0.);
double dx = abs(x - xs);
double dy = abs(y - ys);
if (dx <= 1.5)
{
if (dx <= 0.5)
{
deltax = (1. / 3.)*(1. + sqrt(-3. * dx*dx + 1.));
}
else deltax = (1. / 6.)*(5. - 3. * dx - sqrt(-3. * (1. - dx)*(1. - dx) + 1.));
}
if (dy <= 1.5)
{
if (dy <= 0.5)
{
deltay = (1. / 3.)*(1. + sqrt(-3. * dy*dy + 1.));
}
else deltay = (1. / 6.)*(5. - 3. * dy - sqrt(-3. * (1. - dy)*(1. - dy) + 1.));
}
delta = deltax * deltay;
return delta;
}
__global__ void interpolate(const double * rho, const double * u, const int Ns, const double * u_s, double * F_s, const double * s, const int XDIM)
{
int i(0), j(0), k(0), x0(0), y0(0), x(0), y(0);
double xs(0.), ys(0.);
k = blockIdx.x*blockDim.x + threadIdx.x;
{
F_s[2 * k + 0] = 0.;
F_s[2 * k + 1] = 0.;
xs = s[k * 2 + 0];
ys = s[k * 2 + 1];
x0 = nearbyint(xs);
y0 = nearbyint(ys);
for (i = 0; i < 9; i++)
{
x = nearbyint(x0 + c_l[i * 2 + 0]);
y = nearbyint(y0 + c_l[i * 2 + 1]);
j = y*XDIM + x;
//std::cout << delta << std::endl;
F_s[2 * k + 0] += 2.*(1. * 1. * delta(xs, ys, x, y))*rho[j] * (u_s[2 * k + 0] - u[2 * j + 0]);
F_s[2 * k + 1] += 2.*(1. * 1. * delta(xs, ys, x, y))*rho[j] * (u_s[2 * k + 1] - u[2 * j + 1]);
}
}
}
// rho[SIZE]: fluid density u[2*size]: fluid velocity f[9*size]: density function Ns: No. of cilia boundary points u_s[2*Ns]: cilia velocity F_s[2*Ns]: cilia force
// force[2*size]: fluid force s[2*Ns]: cilia position XDIM: x dimension Q: Net flow epsilon[Ns]: boundary point switching
__global__ void spread(const double * rho, double * u, const double * f, const int Ns, const double * u_s, const double * F_s, double * force, const double * s, const int XDIM, double * Q, const int * epsilon)
{
int i(0), j(0), k(0), x(0), y(0);
double xs(0.), ys(0.);
double momentum[2] = { 0,0 };
j = blockIdx.x*blockDim.x + threadIdx.x;
{
force[2 * j + 0] = 0.;
force[2 * j + 1] = 0.;
x = j%XDIM;
y = (j - j%XDIM) / XDIM;
for (k = 0; k < Ns; k++)
{
xs = s[k * 2 + 0];
ys = s[k * 2 + 1];
force[2 * j + 0] += F_s[2 * k + 0] * delta(xs, ys, x, y) * 1.*epsilon[k];
force[2 * j + 1] += F_s[2 * k + 1] * delta(xs, ys, x, y) * 1.*epsilon[k];
}
momentum[0] = 0.;
momentum[1] = 0.;
for (i = 0; i < 9; i++)
{
momentum[0] += c_l[i * 2 + 0] * f[9 * j + i];
momentum[1] += c_l[i * 2 + 1] * f[9 * j + i];
}
u[2 * j + 0] = (momentum[0] + 0.5*force[2 * j + 0]) / rho[j];
u[2 * j + 1] = (momentum[1] + 0.5*force[2 * j + 1]) / rho[j];
if (x == XDIM - 5)
{
Q[0] += u[2 * j + 0]/200.;
}
}
}
| fc7ace23e22443d5f8eb43c392fcce98049d1003.cu | #include <cmath>
#include <cstdlib>
#include <cstdio>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "ImmersedBoundary.cuh"
#define PI 3.14159
//__device__ const double RHO_0 = 1.;
//__device__ const double C_S = 0.57735;
__device__ const double c_l[9 * 2] = //VELOCITY COMPONENTS
{
0.,0. ,
1.,0. , 0.,1. , -1.,0. , 0.,-1. ,
1.,1. , -1.,1. , -1.,-1. , 1.,-1.
};
__device__ double delta(const double & xs, const double & ys, const int & x, const int & y)
{
double deltax(0.), deltay(0.), delta(0.);
double dx = abs(x - xs);
double dy = abs(y - ys);
if (dx <= 1.5)
{
if (dx <= 0.5)
{
deltax = (1. / 3.)*(1. + sqrt(-3. * dx*dx + 1.));
}
else deltax = (1. / 6.)*(5. - 3. * dx - sqrt(-3. * (1. - dx)*(1. - dx) + 1.));
}
if (dy <= 1.5)
{
if (dy <= 0.5)
{
deltay = (1. / 3.)*(1. + sqrt(-3. * dy*dy + 1.));
}
else deltay = (1. / 6.)*(5. - 3. * dy - sqrt(-3. * (1. - dy)*(1. - dy) + 1.));
}
delta = deltax * deltay;
return delta;
}
__global__ void interpolate(const double * rho, const double * u, const int Ns, const double * u_s, double * F_s, const double * s, const int XDIM)
{
int i(0), j(0), k(0), x0(0), y0(0), x(0), y(0);
double xs(0.), ys(0.);
k = blockIdx.x*blockDim.x + threadIdx.x;
{
F_s[2 * k + 0] = 0.;
F_s[2 * k + 1] = 0.;
xs = s[k * 2 + 0];
ys = s[k * 2 + 1];
x0 = nearbyint(xs);
y0 = nearbyint(ys);
for (i = 0; i < 9; i++)
{
x = nearbyint(x0 + c_l[i * 2 + 0]);
y = nearbyint(y0 + c_l[i * 2 + 1]);
j = y*XDIM + x;
//std::cout << delta << std::endl;
F_s[2 * k + 0] += 2.*(1. * 1. * delta(xs, ys, x, y))*rho[j] * (u_s[2 * k + 0] - u[2 * j + 0]);
F_s[2 * k + 1] += 2.*(1. * 1. * delta(xs, ys, x, y))*rho[j] * (u_s[2 * k + 1] - u[2 * j + 1]);
}
}
}
// rho[SIZE]: fluid density u[2*size]: fluid velocity f[9*size]: density function Ns: No. of cilia boundary points u_s[2*Ns]: cilia velocity F_s[2*Ns]: cilia force
// force[2*size]: fluid force s[2*Ns]: cilia position XDIM: x dimension Q: Net flow epsilon[Ns]: boundary point switching
__global__ void spread(const double * rho, double * u, const double * f, const int Ns, const double * u_s, const double * F_s, double * force, const double * s, const int XDIM, double * Q, const int * epsilon)
{
int i(0), j(0), k(0), x(0), y(0);
double xs(0.), ys(0.);
double momentum[2] = { 0,0 };
j = blockIdx.x*blockDim.x + threadIdx.x;
{
force[2 * j + 0] = 0.;
force[2 * j + 1] = 0.;
x = j%XDIM;
y = (j - j%XDIM) / XDIM;
for (k = 0; k < Ns; k++)
{
xs = s[k * 2 + 0];
ys = s[k * 2 + 1];
force[2 * j + 0] += F_s[2 * k + 0] * delta(xs, ys, x, y) * 1.*epsilon[k];
force[2 * j + 1] += F_s[2 * k + 1] * delta(xs, ys, x, y) * 1.*epsilon[k];
}
momentum[0] = 0.;
momentum[1] = 0.;
for (i = 0; i < 9; i++)
{
momentum[0] += c_l[i * 2 + 0] * f[9 * j + i];
momentum[1] += c_l[i * 2 + 1] * f[9 * j + i];
}
u[2 * j + 0] = (momentum[0] + 0.5*force[2 * j + 0]) / rho[j];
u[2 * j + 1] = (momentum[1] + 0.5*force[2 * j + 1]) / rho[j];
if (x == XDIM - 5)
{
Q[0] += u[2 * j + 0]/200.;
}
}
}
|
b50cd96329a8df3e0fabafd26563360c952ffb1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->height() * bottom[0]->width();
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->height() * bottom[0]->width();
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagte to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
// slope_diff is set as 0, then accumulated over batches
caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
Dtype* temp_buff = multiplier_.mutable_gpu_diff();
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
multiplier_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe | b50cd96329a8df3e0fabafd26563360c952ffb1c.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->height() * bottom[0]->width();
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->height() * bottom[0]->width();
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagte to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
// slope_diff is set as 0, then accumulated over batches
caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff);
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
Dtype* temp_buff = multiplier_.mutable_gpu_diff();
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
multiplier_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe |
682f347759c8ac8464f17073f854d1bb414f088e.hip | // !!! This is a file automatically generated by hipify!!!
#include "database.h"
database* build_database(int size){
database *db = (database*)malloc(sizeof(database));
db->size = size;
db->inputs = (vector**)malloc(size*sizeof(vector*));
db->outputs = (vector**)malloc(size*sizeof(vector*));
return db;
}
database* sample_database(database *db, int size){
database* sample = build_database(size);
int *indices = (int*)malloc(sizeof(int)*size);
for(int element = 0; element < size; ++element){
int index = rand()%db->size;
for(int i = 0; i < element; ++i){
if(index == indices[i]){
index = rand()%db->size;
i = 0;
}
}
indices[element] = index;
sample->inputs[element] = db->inputs[indices[element]];
sample->outputs[element] = db->outputs[indices[element]];
}
free(indices);
return sample;
}
void randomize_database(database h_db, float max_input, float max_output, int input_length, int output_length){
for(int pair = 0; pair < h_db.size; ++pair){
h_db.inputs[pair] = build_vector(input_length);
h_db.outputs[pair] = build_vector(output_length);
randomize_vector(h_db.inputs[pair], max_input);
randomize_vector(h_db.outputs[pair], max_output);
}
}
void read_vector(vector *h_v, int vector_length, FILE *file_pointer){
h_v->length = vector_length;
h_v->elements = (float *) malloc(sizeof(float)*vector_length);
for(int element = 0; element < vector_length; element++){
int tempLength = 40;
char ch = fgetc(file_pointer);
char *temp = (char *)malloc(sizeof(char)*tempLength);
int i;
for(i = 0; i < tempLength || ch != ',' || ch != '\n'; i++){
temp[i] = ch;
ch = fgetc(file_pointer);
}
temp[i] = '\0';
free(temp);
h_v->elements[element] = atof(temp);
}
}
void write_vector(vector *h_v, FILE *file_pointer){
for(int element = 0; element < h_v->length; element++){
fprintf(file_pointer, "%f,",h_v->elements[element]);
}
fprintf(file_pointer, "\n");
}
void copy_database(database* source, database* target, hipMemcpyKind copy) {
target->size = source->size;
for (int pair = 0; pair < target->size; pair++) {
if (copy == hipMemcpyHostToDevice || copy == hipMemcpyDeviceToDevice) {
target->inputs[pair] = cuda_build_vector(source->inputs[pair]->length);
target->outputs[pair] = cuda_build_vector(source->outputs[pair]->length);
}else {
target->inputs[pair] = build_vector(source->inputs[pair]->length);
target->outputs[pair] = build_vector(source->outputs[pair]->length);
}
copy_vector(source->inputs[pair], target->inputs[pair], copy);
copy_vector(source->outputs[pair], target->outputs[pair], copy);
}
}
void free_database(database *h_db){
for(int element = 0; element < h_db->size; element++){
free_vector(h_db->inputs[element]);
free_vector(h_db->outputs[element]);
}
free(h_db);
}
void cuda_free_database(database *d_db){
for(int element = 0; element < d_db->size; element++){
cuda_free_vector(d_db->inputs[element]);
cuda_free_vector(d_db->outputs[element]);
}
free(d_db);
}
int read_database(database *h_db, char *inputs, char *outputs){
int in = read_database_inputs(h_db, inputs);
int out = read_database_outputs(h_db, outputs);
return in || out;
}
int save_database(database *h_db, char *inputs, char *outputs){
int in = save_database_inputs(h_db, inputs);
int out = save_database_outputs(h_db, outputs);
return in || out;
}
int read_database_inputs(database *h_db, char *file_name){
FILE *inputs = fopen(file_name, "r");
return 1;
}
int read_database_outputs(database *h_db, char *file_name){
FILE *outputs = fopen(file_name, "r");
return 1;
}
int save_database_inputs(database *h_db, char *file_name){
FILE *inputs = fopen(file_name, "w");
return 1;
}
int save_database_outputs(database *h_db, char *file_name){
FILE *outputs = fopen(file_name, "w");
return 1;
}
| 682f347759c8ac8464f17073f854d1bb414f088e.cu | #include "database.h"
database* build_database(int size){
database *db = (database*)malloc(sizeof(database));
db->size = size;
db->inputs = (vector**)malloc(size*sizeof(vector*));
db->outputs = (vector**)malloc(size*sizeof(vector*));
return db;
}
database* sample_database(database *db, int size){
database* sample = build_database(size);
int *indices = (int*)malloc(sizeof(int)*size);
for(int element = 0; element < size; ++element){
int index = rand()%db->size;
for(int i = 0; i < element; ++i){
if(index == indices[i]){
index = rand()%db->size;
i = 0;
}
}
indices[element] = index;
sample->inputs[element] = db->inputs[indices[element]];
sample->outputs[element] = db->outputs[indices[element]];
}
free(indices);
return sample;
}
void randomize_database(database h_db, float max_input, float max_output, int input_length, int output_length){
for(int pair = 0; pair < h_db.size; ++pair){
h_db.inputs[pair] = build_vector(input_length);
h_db.outputs[pair] = build_vector(output_length);
randomize_vector(h_db.inputs[pair], max_input);
randomize_vector(h_db.outputs[pair], max_output);
}
}
void read_vector(vector *h_v, int vector_length, FILE *file_pointer){
h_v->length = vector_length;
h_v->elements = (float *) malloc(sizeof(float)*vector_length);
for(int element = 0; element < vector_length; element++){
int tempLength = 40;
char ch = fgetc(file_pointer);
char *temp = (char *)malloc(sizeof(char)*tempLength);
int i;
for(i = 0; i < tempLength || ch != ',' || ch != '\n'; i++){
temp[i] = ch;
ch = fgetc(file_pointer);
}
temp[i] = '\0';
free(temp);
h_v->elements[element] = atof(temp);
}
}
void write_vector(vector *h_v, FILE *file_pointer){
for(int element = 0; element < h_v->length; element++){
fprintf(file_pointer, "%f,",h_v->elements[element]);
}
fprintf(file_pointer, "\n");
}
void copy_database(database* source, database* target, cudaMemcpyKind copy) {
target->size = source->size;
for (int pair = 0; pair < target->size; pair++) {
if (copy == cudaMemcpyHostToDevice || copy == cudaMemcpyDeviceToDevice) {
target->inputs[pair] = cuda_build_vector(source->inputs[pair]->length);
target->outputs[pair] = cuda_build_vector(source->outputs[pair]->length);
}else {
target->inputs[pair] = build_vector(source->inputs[pair]->length);
target->outputs[pair] = build_vector(source->outputs[pair]->length);
}
copy_vector(source->inputs[pair], target->inputs[pair], copy);
copy_vector(source->outputs[pair], target->outputs[pair], copy);
}
}
void free_database(database *h_db){
for(int element = 0; element < h_db->size; element++){
free_vector(h_db->inputs[element]);
free_vector(h_db->outputs[element]);
}
free(h_db);
}
void cuda_free_database(database *d_db){
for(int element = 0; element < d_db->size; element++){
cuda_free_vector(d_db->inputs[element]);
cuda_free_vector(d_db->outputs[element]);
}
free(d_db);
}
int read_database(database *h_db, char *inputs, char *outputs){
int in = read_database_inputs(h_db, inputs);
int out = read_database_outputs(h_db, outputs);
return in || out;
}
int save_database(database *h_db, char *inputs, char *outputs){
int in = save_database_inputs(h_db, inputs);
int out = save_database_outputs(h_db, outputs);
return in || out;
}
int read_database_inputs(database *h_db, char *file_name){
FILE *inputs = fopen(file_name, "r");
return 1;
}
int read_database_outputs(database *h_db, char *file_name){
FILE *outputs = fopen(file_name, "r");
return 1;
}
int save_database_inputs(database *h_db, char *file_name){
FILE *inputs = fopen(file_name, "w");
return 1;
}
int save_database_outputs(database *h_db, char *file_name){
FILE *outputs = fopen(file_name, "w");
return 1;
}
|
0bc5eb20d36008bcaca3d10b53506127987c031e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 1000 // size of vectors
#define T 10000// number of threads per block
__global__ void vecAdd(int *A, int *B, int *C) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
C[i] = A[i] * 10 + B[i];
}
int main(int argc, char **argv) {
int size = N * T * sizeof(int);
int a[N*T], b[N*T], c[N*T], *devA, *devB, *devC;
for (int i = 0; i < N*T; i++) {
/*devA[i] = 0;
devB[i] = 0;
devC[i] = 0;*/
a[i] = i;
b[i] = 1;
//c[i] = 0;
}
hipMalloc((void**)&devA, size);
hipMalloc((void**)&devB, size);
hipMalloc((void**)&devC, size);
hipMemcpy(devA, a, size, hipMemcpyHostToDevice);
hipMemcpy(devB, b, size, hipMemcpyHostToDevice);
vecAdd << <T, N >> > (devA, devB, devC);
hipMemcpy(c, devC, size, hipMemcpyDeviceToHost);
hipFree(devA);
hipFree(devB);
hipFree(devC);
for (int i = 0; i < N*T; i++) {
//c[i] = a[i] * 10 + b[i];
printf("c[%d]= %d\n", i, c[i]);
}
return(0);
}
// Helper function for using CUDA to add vectors in parallel.
| 0bc5eb20d36008bcaca3d10b53506127987c031e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 1000 // size of vectors
#define T 10000// number of threads per block
__global__ void vecAdd(int *A, int *B, int *C) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
C[i] = A[i] * 10 + B[i];
}
int main(int argc, char **argv) {
int size = N * T * sizeof(int);
int a[N*T], b[N*T], c[N*T], *devA, *devB, *devC;
for (int i = 0; i < N*T; i++) {
/*devA[i] = 0;
devB[i] = 0;
devC[i] = 0;*/
a[i] = i;
b[i] = 1;
//c[i] = 0;
}
cudaMalloc((void**)&devA, size);
cudaMalloc((void**)&devB, size);
cudaMalloc((void**)&devC, size);
cudaMemcpy(devA, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, size, cudaMemcpyHostToDevice);
vecAdd << <T, N >> > (devA, devB, devC);
cudaMemcpy(c, devC, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
for (int i = 0; i < N*T; i++) {
//c[i] = a[i] * 10 + b[i];
printf("c[%d]= %d\n", i, c[i]);
}
return(0);
}
// Helper function for using CUDA to add vectors in parallel.
|
e9e57ad8257242deb69b4c82b4dbc39a65b4566e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
} | e9e57ad8257242deb69b4c82b4dbc39a65b4566e.cu | #include "includes.h"
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
} |
1ddeb35177a60d57f4d4837cf037bf5bc45481d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/top_n_error.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_top_n_error_reduction(const int num, const int size1_,
const int size2_, const int n_,
const T *x, const T1 *l, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int idx2 = idx % size2_;
const int idx0 = idx / size2_;
const int l_in = idx0 * size2_ + idx2;
const T1 label = l[l_in];
const int x_in = idx0 * size2_ * size1_ + idx2;
const T threshold = x[x_in + label * size2_];
T1 count = 0;
for (int i = 0; i < size1_; i++) {
count += x[x_in + i * size2_] >= threshold;
}
y[l_in] = count > n_;
}
}
template <typename T, typename T1>
void TopNErrorCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *p = inputs[0]->get_data_pointer<T>(this->ctx_);
const T1 *l = inputs[1]->get_data_pointer<T1>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_top_n_error_reduction,
this->size0_ * this->size2_, this->size1_,
this->size2_, this->n_, p, l, y);
}
template <typename T, typename T1>
void TopNErrorCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
// not supported
}
// template instantiation
template class TopNErrorCuda<float, int>;
}
| 1ddeb35177a60d57f4d4837cf037bf5bc45481d2.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/top_n_error.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_top_n_error_reduction(const int num, const int size1_,
const int size2_, const int n_,
const T *x, const T1 *l, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int idx2 = idx % size2_;
const int idx0 = idx / size2_;
const int l_in = idx0 * size2_ + idx2;
const T1 label = l[l_in];
const int x_in = idx0 * size2_ * size1_ + idx2;
const T threshold = x[x_in + label * size2_];
T1 count = 0;
for (int i = 0; i < size1_; i++) {
count += x[x_in + i * size2_] >= threshold;
}
y[l_in] = count > n_;
}
}
template <typename T, typename T1>
void TopNErrorCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *p = inputs[0]->get_data_pointer<T>(this->ctx_);
const T1 *l = inputs[1]->get_data_pointer<T1>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_top_n_error_reduction,
this->size0_ * this->size2_, this->size1_,
this->size2_, this->n_, p, l, y);
}
template <typename T, typename T1>
void TopNErrorCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
// not supported
}
// template instantiation
template class TopNErrorCuda<float, int>;
}
|
5a262011663f14bbd371d59b6e937668e29f2840.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "rocblas.h"
#include "Utilities.cuh"
float *createPermutationMatrix(int *h_PivotArray, int N)
{
int temp;
// --- Create permutation matrix
float *P = (float *)malloc(N * N * sizeof(float));
for (int i=0; i<N; i++) {
P[i] = 0.0f;
for (int j=0; j<N; j++)
if (i == j) P[i * N + j] = 1.0f;
}
for (int j=0; j<N; j++)
for (int i=0; i<N-1; i++) {
temp = P[i + j * N];
P[i + j * N] = P[(h_PivotArray[i] - 1) + j * N];
P[(h_PivotArray[i] - 1) + j * N] = temp;
}
return P;
}
/********/
/* MAIN */
/********/
int main() {
const unsigned int N = 3;
const unsigned int Nmatrices = 1;
hipblasHandle_t handle;
cublasSafeCall(hipblasCreate(&handle));
/***********************/
/* SETTING THE PROBLEM */
/***********************/
// --- Matrices to be inverted (only one in this example)
float *h_A = new float[N*N*Nmatrices];
h_A[0] = 4.f;
h_A[1] = 3.f;
h_A[2] = 8.f;
h_A[3] = 9.f;
h_A[4] = 5.f;
h_A[5] = 1.f;
h_A[6] = 2.f;
h_A[7] = 7.f;
h_A[8] = 6.f;
// --- Known term (only one in this example)
float *h_B = new float[N];
h_B[0] = 1.f;
h_B[1] = 0.5f;
h_B[2] = 3.;
// --- Result (only one in this example)
float *h_X = new float[N];
// --- Allocate device space for the input matrices
float *d_A; gpuErrchk(hipMalloc((void**)&d_A, N*N*Nmatrices*sizeof(float)));
float *d_B; gpuErrchk(hipMalloc((void**)&d_B, N* sizeof(float)));
float *d_X; gpuErrchk(hipMalloc((void**)&d_X, N* sizeof(float)));
// --- Move the relevant matrices from host to device
gpuErrchk(hipMemcpy(d_A,h_A,N*N*Nmatrices*sizeof(float),hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_B,h_B,N* sizeof(float),hipMemcpyHostToDevice));
/********************/
/* LU DECOMPOSITION */
/********************/
// --- Creating the array of pointers needed as input/output to the batched getrf
float **h_inout_pointers = (float **)malloc(Nmatrices*sizeof(float *));
for (int i=0; i<Nmatrices; i++) h_inout_pointers[i]=(float *)((char*)d_A+i*((size_t)N*N)*sizeof(float));
float **d_inout_pointers;
gpuErrchk(hipMalloc((void**)&d_inout_pointers, Nmatrices*sizeof(float *)));
gpuErrchk(hipMemcpy(d_inout_pointers,h_inout_pointers,Nmatrices*sizeof(float *),hipMemcpyHostToDevice));
free(h_inout_pointers);
int *d_PivotArray; gpuErrchk(hipMalloc((void**)&d_PivotArray, N*Nmatrices*sizeof(int)));
int *d_InfoArray; gpuErrchk(hipMalloc((void**)&d_InfoArray, Nmatrices*sizeof(int)));
int *h_PivotArray = (int *)malloc(N*Nmatrices*sizeof(int));
int *h_InfoArray = (int *)malloc( Nmatrices*sizeof(int));
cublasSafeCall(hipblasSgetrfBatched(handle, N, d_inout_pointers, N, d_PivotArray, d_InfoArray, Nmatrices));
//cublasSafeCall(hipblasSgetrfBatched(handle, N, d_inout_pointers, N, NULL, d_InfoArray, Nmatrices));
gpuErrchk(hipMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < Nmatrices; i++)
if (h_InfoArray[i] != 0) {
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
gpuErrchk(hipMemcpy(h_A,d_A,N*N*sizeof(float),hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_PivotArray,d_PivotArray,N*Nmatrices*sizeof(int),hipMemcpyDeviceToHost));
// --- The output factored matrix in column-major format
for (int i=0; i<N*N; i++) printf("A[%i]=%f\n", i, h_A[i]);
printf("\n\n");
// --- The pivot array
for (int i=0; i<N; i++) printf("IPIV[%i]=%i\n", i, h_PivotArray[i]);
/*******************************************/
/* APPROACH NR.1: THROUGH THE INVERSE OF A */
/*******************************************/
// --- Allocate host space for the inverted matrices
float *h_C = new float[N*N*Nmatrices];
// --- Allocate device space for the inverted matrices
float *d_C; gpuErrchk(hipMalloc((void**)&d_C, N*N*Nmatrices*sizeof(float)));
// --- Creating the array of pointers needed as output to the batched getri
float **h_out_pointers = (float **)malloc(Nmatrices*sizeof(float *));
for (int i=0; i<Nmatrices; i++) h_out_pointers[i]=(float *)((char*)d_C+i*((size_t)N*N)*sizeof(float));
float **d_out_pointers;
gpuErrchk(hipMalloc((void**)&d_out_pointers, Nmatrices*sizeof(float *)));
gpuErrchk(hipMemcpy(d_out_pointers,h_out_pointers,Nmatrices*sizeof(float *),hipMemcpyHostToDevice));
free(h_out_pointers);
cublasSafeCall(hipblasSgetriBatched(handle, N, (const float **)d_inout_pointers, N, d_PivotArray, d_out_pointers, N, d_InfoArray, Nmatrices));
gpuErrchk(hipMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),hipMemcpyDeviceToHost));
for (int i = 0; i < Nmatrices; i++)
if (h_InfoArray[i] != 0) {
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
hipDeviceReset();
exit(EXIT_FAILURE);
}
gpuErrchk(hipMemcpy(h_C,d_C,N*N*sizeof(float),hipMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N*N; i++) printf("C[%i]=%f\n", i, h_C[i]);
float alpha1 = 1.f;
float beta1 = 0.f;
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N, N, &alpha1, d_C, N, d_B, 1, &beta1, d_X, 1));
gpuErrchk(hipMemcpy(h_X,d_X,N*sizeof(float),hipMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N; i++) printf("X[%i]=%f\n", i, h_X[i]);
/*****************************************************************************/
/* APPROACH NR.2: THROUGH THE INVERSE OF UPPER AND LOWER TRIANGULAR MATRICES */
/*****************************************************************************/
float *P = createPermutationMatrix(h_PivotArray, N);
float *d_P; gpuErrchk(hipMalloc((void**)&d_P, N * N * sizeof(float)));
printf("\n\n");
// --- The permutation matrix
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
printf("P[%i, %i]=%f\n", i, j, P[j * N + i]);
gpuErrchk(hipMemcpy(d_P, P, N * N * sizeof(float), hipMemcpyHostToDevice));
// --- Now P*A=L*U
// Linear system A*x=y => P.'*L*U*x=y => L*U*x=P*y
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N, N, &alpha1, d_P, N, d_B, 1, &beta1, d_B, 1));
gpuErrchk(hipMemcpy(h_B,d_B,N*sizeof(float),hipMemcpyDeviceToHost));
// --- The result of P*y
printf("\n\n");
for (int i=0; i<N; i++) printf("(P*y)[%i]=%f\n", i, h_B[i]);
// --- 1st phase - solve Ly = b
const float alpha = 1.f;
// --- Function solves the triangulatr linear system with multiple right hand sides, function overrides b as a result
// --- Lower triangular part
cublasSafeCall(hipblasStrsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT, N, 1, &alpha, d_A, N, d_B, N));
// --- Upper triangular part
cublasSafeCall(hipblasStrsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, N, 1, &alpha, d_A, N, d_B, N));
gpuErrchk(hipMemcpy(h_B,d_B,N*sizeof(float),hipMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N; i++) printf("B[%i]=%f\n", i, h_B[i]);
return 0;
}
| 5a262011663f14bbd371d59b6e937668e29f2840.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cublas_v2.h"
#include "Utilities.cuh"
float *createPermutationMatrix(int *h_PivotArray, int N)
{
int temp;
// --- Create permutation matrix
float *P = (float *)malloc(N * N * sizeof(float));
for (int i=0; i<N; i++) {
P[i] = 0.0f;
for (int j=0; j<N; j++)
if (i == j) P[i * N + j] = 1.0f;
}
for (int j=0; j<N; j++)
for (int i=0; i<N-1; i++) {
temp = P[i + j * N];
P[i + j * N] = P[(h_PivotArray[i] - 1) + j * N];
P[(h_PivotArray[i] - 1) + j * N] = temp;
}
return P;
}
/********/
/* MAIN */
/********/
int main() {
const unsigned int N = 3;
const unsigned int Nmatrices = 1;
cublasHandle_t handle;
cublasSafeCall(cublasCreate(&handle));
/***********************/
/* SETTING THE PROBLEM */
/***********************/
// --- Matrices to be inverted (only one in this example)
float *h_A = new float[N*N*Nmatrices];
h_A[0] = 4.f;
h_A[1] = 3.f;
h_A[2] = 8.f;
h_A[3] = 9.f;
h_A[4] = 5.f;
h_A[5] = 1.f;
h_A[6] = 2.f;
h_A[7] = 7.f;
h_A[8] = 6.f;
// --- Known term (only one in this example)
float *h_B = new float[N];
h_B[0] = 1.f;
h_B[1] = 0.5f;
h_B[2] = 3.;
// --- Result (only one in this example)
float *h_X = new float[N];
// --- Allocate device space for the input matrices
float *d_A; gpuErrchk(cudaMalloc((void**)&d_A, N*N*Nmatrices*sizeof(float)));
float *d_B; gpuErrchk(cudaMalloc((void**)&d_B, N* sizeof(float)));
float *d_X; gpuErrchk(cudaMalloc((void**)&d_X, N* sizeof(float)));
// --- Move the relevant matrices from host to device
gpuErrchk(cudaMemcpy(d_A,h_A,N*N*Nmatrices*sizeof(float),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B,h_B,N* sizeof(float),cudaMemcpyHostToDevice));
/********************/
/* LU DECOMPOSITION */
/********************/
// --- Creating the array of pointers needed as input/output to the batched getrf
float **h_inout_pointers = (float **)malloc(Nmatrices*sizeof(float *));
for (int i=0; i<Nmatrices; i++) h_inout_pointers[i]=(float *)((char*)d_A+i*((size_t)N*N)*sizeof(float));
float **d_inout_pointers;
gpuErrchk(cudaMalloc((void**)&d_inout_pointers, Nmatrices*sizeof(float *)));
gpuErrchk(cudaMemcpy(d_inout_pointers,h_inout_pointers,Nmatrices*sizeof(float *),cudaMemcpyHostToDevice));
free(h_inout_pointers);
int *d_PivotArray; gpuErrchk(cudaMalloc((void**)&d_PivotArray, N*Nmatrices*sizeof(int)));
int *d_InfoArray; gpuErrchk(cudaMalloc((void**)&d_InfoArray, Nmatrices*sizeof(int)));
int *h_PivotArray = (int *)malloc(N*Nmatrices*sizeof(int));
int *h_InfoArray = (int *)malloc( Nmatrices*sizeof(int));
cublasSafeCall(cublasSgetrfBatched(handle, N, d_inout_pointers, N, d_PivotArray, d_InfoArray, Nmatrices));
//cublasSafeCall(cublasSgetrfBatched(handle, N, d_inout_pointers, N, NULL, d_InfoArray, Nmatrices));
gpuErrchk(cudaMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < Nmatrices; i++)
if (h_InfoArray[i] != 0) {
fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
gpuErrchk(cudaMemcpy(h_A,d_A,N*N*sizeof(float),cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_PivotArray,d_PivotArray,N*Nmatrices*sizeof(int),cudaMemcpyDeviceToHost));
// --- The output factored matrix in column-major format
for (int i=0; i<N*N; i++) printf("A[%i]=%f\n", i, h_A[i]);
printf("\n\n");
// --- The pivot array
for (int i=0; i<N; i++) printf("IPIV[%i]=%i\n", i, h_PivotArray[i]);
/*******************************************/
/* APPROACH NR.1: THROUGH THE INVERSE OF A */
/*******************************************/
// --- Allocate host space for the inverted matrices
float *h_C = new float[N*N*Nmatrices];
// --- Allocate device space for the inverted matrices
float *d_C; gpuErrchk(cudaMalloc((void**)&d_C, N*N*Nmatrices*sizeof(float)));
// --- Creating the array of pointers needed as output to the batched getri
float **h_out_pointers = (float **)malloc(Nmatrices*sizeof(float *));
for (int i=0; i<Nmatrices; i++) h_out_pointers[i]=(float *)((char*)d_C+i*((size_t)N*N)*sizeof(float));
float **d_out_pointers;
gpuErrchk(cudaMalloc((void**)&d_out_pointers, Nmatrices*sizeof(float *)));
gpuErrchk(cudaMemcpy(d_out_pointers,h_out_pointers,Nmatrices*sizeof(float *),cudaMemcpyHostToDevice));
free(h_out_pointers);
cublasSafeCall(cublasSgetriBatched(handle, N, (const float **)d_inout_pointers, N, d_PivotArray, d_out_pointers, N, d_InfoArray, Nmatrices));
gpuErrchk(cudaMemcpy(h_InfoArray,d_InfoArray,Nmatrices*sizeof(int),cudaMemcpyDeviceToHost));
for (int i = 0; i < Nmatrices; i++)
if (h_InfoArray[i] != 0) {
fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
gpuErrchk(cudaMemcpy(h_C,d_C,N*N*sizeof(float),cudaMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N*N; i++) printf("C[%i]=%f\n", i, h_C[i]);
float alpha1 = 1.f;
float beta1 = 0.f;
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N, N, &alpha1, d_C, N, d_B, 1, &beta1, d_X, 1));
gpuErrchk(cudaMemcpy(h_X,d_X,N*sizeof(float),cudaMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N; i++) printf("X[%i]=%f\n", i, h_X[i]);
/*****************************************************************************/
/* APPROACH NR.2: THROUGH THE INVERSE OF UPPER AND LOWER TRIANGULAR MATRICES */
/*****************************************************************************/
float *P = createPermutationMatrix(h_PivotArray, N);
float *d_P; gpuErrchk(cudaMalloc((void**)&d_P, N * N * sizeof(float)));
printf("\n\n");
// --- The permutation matrix
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
printf("P[%i, %i]=%f\n", i, j, P[j * N + i]);
gpuErrchk(cudaMemcpy(d_P, P, N * N * sizeof(float), cudaMemcpyHostToDevice));
// --- Now P*A=L*U
// Linear system A*x=y => P.'*L*U*x=y => L*U*x=P*y
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N, N, &alpha1, d_P, N, d_B, 1, &beta1, d_B, 1));
gpuErrchk(cudaMemcpy(h_B,d_B,N*sizeof(float),cudaMemcpyDeviceToHost));
// --- The result of P*y
printf("\n\n");
for (int i=0; i<N; i++) printf("(P*y)[%i]=%f\n", i, h_B[i]);
// --- 1st phase - solve Ly = b
const float alpha = 1.f;
// --- Function solves the triangulatr linear system with multiple right hand sides, function overrides b as a result
// --- Lower triangular part
cublasSafeCall(cublasStrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, N, 1, &alpha, d_A, N, d_B, N));
// --- Upper triangular part
cublasSafeCall(cublasStrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, N, 1, &alpha, d_A, N, d_B, N));
gpuErrchk(cudaMemcpy(h_B,d_B,N*sizeof(float),cudaMemcpyDeviceToHost));
// --- The output inverted matrix in column-major format
printf("\n\n");
for (int i=0; i<N; i++) printf("B[%i]=%f\n", i, h_B[i]);
return 0;
}
|
e4ac3f37acc0b4deabc7c81e28d8c247a6d3df54.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| e4ac3f37acc0b4deabc7c81e28d8c247a6d3df54.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
e6c5e979902d4dfd71816837b657a74eae19b2f7.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -o test histogram_eq_gpu.cu -std=c++11 -lopencv_core -lopencv_highgui -lopencv_imgproc -lopencv_imgcodecs
#include <iostream>
#include <cstdio>
#include <cmath>
#include <omp.h>
#include <chrono>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "common.h"
#include <hip/hip_runtime.h>
using namespace std;
//Adrian Biller A01018940
//historgram equalizer
__global__ void create_image_histogram(char *input, int* histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if(idx < nx && idx < iy){
histogram[(int)input[idx]] ++;
}
}
__global__ void normalize_histogram(char *input, int *histogram, int *normalized_histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
int accumulated = 0;
// __syncthreads();
for(int x = 0; x < 255; x ++){
accumulated += histogram[x];
}
// __syncthreads();
if(idx < 255){
normalized_histogram[idx] = accumulated * 255 / (nx*ny);
}
}
__global__ void contrast_image(char *input, char *output, int* normalized_histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
output[idx] = normalized_histogram[ (int)input[idx] ];
}
int main(int argc, char *argv[])
{
string imagePath;
// checking image path
if(argc < 2)
imagePath = "Images/dog2.jpeg";
else
imagePath = argv[1];
// read color image
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//converting image to grayscale
cv::Mat grayscale_input;
cvtColor(input, grayscale_input, cv::COLOR_BGR2GRAY);
//creating output image
cv::Mat output(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
//changing contrastof output image
// image_histogram_equalizer(grayscale_input, output);
//declaring histogram arrays
int histogram[256] = {0};
int normalized_histogram[256] = {0};
//CUDA Kernel
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
int nx = grayscale_input.cols;
int ny = grayscale_input.rows;
int nxy = nx * ny;
int nBytes = nxy * sizeof(char);
char *d_input, *d_output;
int *d_histogram, *d_normalized_histogram;
int rBytes = 256 * sizeof(int);
SAFE_CALL(hipMalloc((void **)&d_input, nBytes), "Error allocating input image");
SAFE_CALL(hipMalloc((void **)&d_output, nBytes), "Error allocating output image");
SAFE_CALL(hipMalloc((void **)&d_histogram, rBytes), "Error allocating histogram");
SAFE_CALL(hipMalloc((void **)&d_normalized_histogram, rBytes), "Error allocating normalized histogram");
SAFE_CALL(hipMemcpy(d_input, grayscale_input.ptr(), nBytes, hipMemcpyHostToDevice), "Error copying input image");
SAFE_CALL(hipMemcpy(d_histogram, &histogram, rBytes, hipMemcpyHostToDevice), "Error copying input image");
SAFE_CALL(hipMemcpy(d_normalized_histogram, &normalized_histogram, rBytes, hipMemcpyHostToDevice), "Error copying input image");
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
auto start_cpu = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( create_image_histogram), dim3(grid), dim3(block), 0, 0, d_input, d_histogram, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel1");
hipLaunchKernelGGL(( normalize_histogram), dim3(grid), dim3(block), 0, 0, d_input, d_histogram, d_normalized_histogram, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel2");
hipLaunchKernelGGL(( contrast_image), dim3(grid), dim3(block), 0, 0, d_input, d_output, d_normalized_histogram, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel3");
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
SAFE_CALL(hipMemcpy(output.ptr(), d_output, nBytes, hipMemcpyDeviceToHost), "Error copying image");
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
//showing initial image vs contrast change
cv::imshow("Input", grayscale_input);
cv::imshow("Output", output);
//Wait for key press
cv::waitKey();
return 0;
}
| e6c5e979902d4dfd71816837b657a74eae19b2f7.cu | //nvcc -o test histogram_eq_gpu.cu -std=c++11 -lopencv_core -lopencv_highgui -lopencv_imgproc -lopencv_imgcodecs
#include <iostream>
#include <cstdio>
#include <cmath>
#include <omp.h>
#include <chrono>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "common.h"
#include <cuda_runtime.h>
using namespace std;
//Adrian Biller A01018940
//historgram equalizer
__global__ void create_image_histogram(char *input, int* histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if(idx < nx && idx < iy){
histogram[(int)input[idx]] ++;
}
}
__global__ void normalize_histogram(char *input, int *histogram, int *normalized_histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
int accumulated = 0;
// __syncthreads();
for(int x = 0; x < 255; x ++){
accumulated += histogram[x];
}
// __syncthreads();
if(idx < 255){
normalized_histogram[idx] = accumulated * 255 / (nx*ny);
}
}
__global__ void contrast_image(char *input, char *output, int* normalized_histogram, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
output[idx] = normalized_histogram[ (int)input[idx] ];
}
int main(int argc, char *argv[])
{
string imagePath;
// checking image path
if(argc < 2)
imagePath = "Images/dog2.jpeg";
else
imagePath = argv[1];
// read color image
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//converting image to grayscale
cv::Mat grayscale_input;
cvtColor(input, grayscale_input, cv::COLOR_BGR2GRAY);
//creating output image
cv::Mat output(grayscale_input.rows, grayscale_input.cols, grayscale_input.type());
//changing contrastof output image
// image_histogram_equalizer(grayscale_input, output);
//declaring histogram arrays
int histogram[256] = {0};
int normalized_histogram[256] = {0};
//CUDA Kernel
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
int nx = grayscale_input.cols;
int ny = grayscale_input.rows;
int nxy = nx * ny;
int nBytes = nxy * sizeof(char);
char *d_input, *d_output;
int *d_histogram, *d_normalized_histogram;
int rBytes = 256 * sizeof(int);
SAFE_CALL(cudaMalloc((void **)&d_input, nBytes), "Error allocating input image");
SAFE_CALL(cudaMalloc((void **)&d_output, nBytes), "Error allocating output image");
SAFE_CALL(cudaMalloc((void **)&d_histogram, rBytes), "Error allocating histogram");
SAFE_CALL(cudaMalloc((void **)&d_normalized_histogram, rBytes), "Error allocating normalized histogram");
SAFE_CALL(cudaMemcpy(d_input, grayscale_input.ptr(), nBytes, cudaMemcpyHostToDevice), "Error copying input image");
SAFE_CALL(cudaMemcpy(d_histogram, &histogram, rBytes, cudaMemcpyHostToDevice), "Error copying input image");
SAFE_CALL(cudaMemcpy(d_normalized_histogram, &normalized_histogram, rBytes, cudaMemcpyHostToDevice), "Error copying input image");
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
auto start_cpu = chrono::high_resolution_clock::now();
create_image_histogram<<<grid, block>>>(d_input, d_histogram, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel1");
normalize_histogram<<<grid, block>>>(d_input, d_histogram, d_normalized_histogram, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel2");
contrast_image<<<grid, block>>>(d_input, d_output, d_normalized_histogram, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel3");
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, nBytes, cudaMemcpyDeviceToHost), "Error copying image");
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
//showing initial image vs contrast change
cv::imshow("Input", grayscale_input);
cv::imshow("Output", output);
//Wait for key press
cv::waitKey();
return 0;
}
|
d9dd6be31da45a5039e7950d33ec1554be32d763.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#define N 100
#include <math.h>
__global__ void vector_add(float *out, float *a, float *b, int n) {
// get global thread id
int id = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// make sure we dont go out of thread index
if( id < N ){
out[id] = a[id] + b[id];
}
}
int main(){
float *a, *b, *out;
float *d_a,*d_b,*d_c;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Allocate device memory for a,b,c
hipMalloc((void**)&d_a,sizeof(float)*N );
hipMalloc((void**)&d_b,sizeof(float)*N );
hipMalloc((void**)&d_c,sizeof(float)*N );
// Initialize array
for(int i = 0; i < N; i++){
a[i] = sin(i)*sin(i)+cos(i); b[i] = cos(i)*cos(i)+sin(i);
}
// transfer data from host to device
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
// hipMemcpy(d_c, out, sizeof(float) * N, hipMemcpyHostToDevice);
// define 2d block. Note this is 1 grid but 2d block . use the cheat cheat
// to get how to compute the global id
dim3 threads(2,2,13);
// Main function
hipLaunchKernelGGL(( vector_add), dim3(2),dim3(threads), 0, 0, d_c,d_a, d_b, N);
//copy result back to host
hipMemcpy(out, d_c,sizeof(float) * N , hipMemcpyDeviceToHost);
// print results
int i;
for (i=0;i <N;i++) {
printf("%lf,",out[i]); }
// synchronize execution
//hipDeviceSynchronize();
//clean up after executing kernel
hipFree(d_a);hipFree(d_b);hipFree(d_c);
free(a);free(b);free(out);
}
| d9dd6be31da45a5039e7950d33ec1554be32d763.cu | #include<stdio.h>
#define N 100
#include <math.h>
__global__ void vector_add(float *out, float *a, float *b, int n) {
// get global thread id
int id = blockIdx.x * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// make sure we dont go out of thread index
if( id < N ){
out[id] = a[id] + b[id];
}
}
int main(){
float *a, *b, *out;
float *d_a,*d_b,*d_c;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Allocate device memory for a,b,c
cudaMalloc((void**)&d_a,sizeof(float)*N );
cudaMalloc((void**)&d_b,sizeof(float)*N );
cudaMalloc((void**)&d_c,sizeof(float)*N );
// Initialize array
for(int i = 0; i < N; i++){
a[i] = sin(i)*sin(i)+cos(i); b[i] = cos(i)*cos(i)+sin(i);
}
// transfer data from host to device
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// cudaMemcpy(d_c, out, sizeof(float) * N, cudaMemcpyHostToDevice);
// define 2d block. Note this is 1 grid but 2d block . use the cheat cheat
// to get how to compute the global id
dim3 threads(2,2,13);
// Main function
vector_add<<<2,threads>>>(d_c,d_a, d_b, N);
//copy result back to host
cudaMemcpy(out, d_c,sizeof(float) * N , cudaMemcpyDeviceToHost);
// print results
int i;
for (i=0;i <N;i++) {
printf("%lf,",out[i]); }
// synchronize execution
//cudaDeviceSynchronize();
//clean up after executing kernel
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
free(a);free(b);free(out);
}
|
188a9627c41415e2da2fd3bed11ecb484123905b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <hip/hip_runtime.h>
#include "cuda_header.cuh"
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
gpu_out_data[thread_index] = 0.0;
for(int i=0;i<blur_v_size;i++){
if(i>thread_index)
break;
gpu_out_data[thread_index] += gpu_raw_data[thread_index-i]*gpu_blur_v[i];
}
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index;
thread_index = blockDim.x*blockIdx.x+threadIdx.x;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (thread_index<n_frames) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
thread_index += blockDim.x;
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
hipEvent_t start_gpu, stop_gpu;
float time_milli = -1;
hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
hipEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
hipMalloc((void **) &gpu_raw_data, n_frames*sizeof(float));
hipMemcpy(gpu_raw_data,raw_data,n_frames*sizeof(float),hipMemcpyHostToDevice);
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
hipMalloc((void **) &gpu_blur_v,blur_v_size*sizeof(float));
hipMemcpy(gpu_blur_v,blur_v,blur_v_size*sizeof(float),hipMemcpyHostToDevice);
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
hipMalloc((void **) &gpu_out_data,n_frames*sizeof(float));
hipMemset(gpu_out_data,0.0,n_frames*sizeof(float));
// TODO: Appropriately call the kernel function.
hipLaunchKernelGGL(( cuda_blur_kernel), dim3(blocks),dim3(threads_per_block), 0, 0, gpu_raw_data, gpu_blur_v,gpu_out_data,n_frames,blur_v_size);
// Check for errors on kernel call
hipError_t err = hipGetLastError();
if (hipSuccess != err)
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
hipMemcpy(out_data,gpu_out_data,n_frames*sizeof(float),hipMemcpyDeviceToHost);
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
hipFree(gpu_out_data);
hipFree(gpu_blur_v);
hipFree(gpu_raw_data);
// Stop the recording timer and return the computation time
hipEventRecord(stop_gpu);
hipEventSynchronize(stop_gpu);
hipEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
| 188a9627c41415e2da2fd3bed11ecb484123905b.cu | /*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <cuda_runtime.h>
#include "cuda_header.cuh"
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
gpu_out_data[thread_index] = 0.0;
for(int i=0;i<blur_v_size;i++){
if(i>thread_index)
break;
gpu_out_data[thread_index] += gpu_raw_data[thread_index-i]*gpu_blur_v[i];
}
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index;
thread_index = blockDim.x*blockIdx.x+threadIdx.x;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (thread_index<n_frames) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
thread_index += blockDim.x;
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
cudaEvent_t start_gpu, stop_gpu;
float time_milli = -1;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
cudaMalloc((void **) &gpu_raw_data, n_frames*sizeof(float));
cudaMemcpy(gpu_raw_data,raw_data,n_frames*sizeof(float),cudaMemcpyHostToDevice);
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
cudaMalloc((void **) &gpu_blur_v,blur_v_size*sizeof(float));
cudaMemcpy(gpu_blur_v,blur_v,blur_v_size*sizeof(float),cudaMemcpyHostToDevice);
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
cudaMalloc((void **) &gpu_out_data,n_frames*sizeof(float));
cudaMemset(gpu_out_data,0.0,n_frames*sizeof(float));
// TODO: Appropriately call the kernel function.
cuda_blur_kernel<<<blocks,threads_per_block>>>(gpu_raw_data, gpu_blur_v,gpu_out_data,n_frames,blur_v_size);
// Check for errors on kernel call
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
cudaMemcpy(out_data,gpu_out_data,n_frames*sizeof(float),cudaMemcpyDeviceToHost);
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
cudaFree(gpu_out_data);
cudaFree(gpu_blur_v);
cudaFree(gpu_raw_data);
// Stop the recording timer and return the computation time
cudaEventRecord(stop_gpu);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
|
0aaed97201c4c2aca2efb556444c168d68a2586a.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <hip/hip_runtime.h>
// #include <rocblas.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
shuff = __shfl_down_sync(0xffffffff, x, offset);
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, hipStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
hipLaunchKernelGGL(( reduce_rows<128>), dim3(grid_size), dim3(128), 0, stream,
f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
hipLaunchKernelGGL(( reduce_cols<128>), dim3(grid_size), dim3(tpb), 0, stream,
f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, hipStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
hipStreamSynchronize(stream);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream);
}
| 0aaed97201c4c2aca2efb556444c168d68a2586a.cu | // Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <cuda_runtime.h>
// #include <cublas_v2.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
shuff = __shfl_down_sync(0xffffffff, x, offset);
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, cudaStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
reduce_rows<128><<<grid_size, 128, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
reduce_cols<128><<<grid_size, tpb, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, cudaStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
cudaStreamSynchronize(stream);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream);
}
|
91366ec9985249243447bbc753162cd98db3643b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void subsample(float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// Get the good mask for (k,i) (k out, i in)
float the_weight = weight[k];
// Initialize to the bias
float the_bias = bias[k];
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = the_weight*sum + the_bias;
}
}
} | 91366ec9985249243447bbc753162cd98db3643b.cu | #include "includes.h"
__global__ void subsample(float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// Get the good mask for (k,i) (k out, i in)
float the_weight = weight[k];
// Initialize to the bias
float the_bias = bias[k];
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = the_weight*sum + the_bias;
}
}
} |
f9fe1ccedd4148598e3f5dede27403bae59dd8d1.hip | // !!! This is a file automatically generated by hipify!!!
/////////////////////////////////////////////////////////////////////////
// Parallel Computing Assignment 3
// Chris Jimenez
// 5/1/14
// This CUDA program finds the max integer in an array of random integers.
// This program DOES NOT use shared meemory and DOES take thread
// divergaence in to consideration. The modification can be seen
// in the kernel function with the use of the WARP_SIZE defined var.
//
/////////////////////////////////////////////////////////////////////////
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//define numebr of integers...
#define NUM_OF_INTEGERS 8192
//define max integer
#define MAX 100000
#define WARP_SIZE 32
///////////////////////////////////
/*The folllowing is dependent on whatever GPU this program is running on
if runnign on the NYU GPU's, the max threads per block is 512.
RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads
per block is 1024
*/
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK
/****** Function declarations */
void fill_array();
__global__ void get_max(int *array);
/********************************/
/////////////////////////////////////////////////////////
/*******************************************************/
/* Function fills the givne array a with random integers */
void fill_array(int *a){
int i;
time_t t;
/* Intializes random number generator */
srand((unsigned) time(&t));
for(i = 0; i < NUM_OF_INTEGERS; i++){
a[i] = random() % MAX;;
}
}
/*******************************************************/
/* Kernel Function finds the max integer in given array by
using reduction technique. Ultimately, the largest
will be located at the 0th position of the array */
__global__ void get_max(int *array){
int temp;
int index = threadIdx.x + (blockDim.x * blockIdx.x);
int nTotalThreads = NUM_OF_INTEGERS; // Total number of active threads
while(nTotalThreads > WARP_SIZE)
{
int halfPoint = nTotalThreads / 2; // divide by two
// only the first half of the threads will be active.
if (index < halfPoint){
temp = array[ index + halfPoint ];
if (temp > array[ index ]) {
array[index] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads / 2; // divide by two.
}
// at this point...nTotalThreads == 32
// that means that array[0:31] has the top
// 32 values...
}
/*******************************************************/
//Main function.....
int main(int argc, char *argv[]){
int *h_array; //array of random integers....
int *d_array; //device copy...
int max = 0;
printf("Initializing data...\n");
//allocating space for the array on host
h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int));
//fill in random array
fill_array(h_array);
//allocate space for array and resultmax on device
hipMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS );
//Copy array from host to device...
hipMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyHostToDevice);
//call kernel! using for loop
hipLaunchKernelGGL(( get_max), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, d_array);
//Copy array from device to host...
hipMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, hipMemcpyDeviceToHost);
//given the top 32 largest numbers, search through to get max...
for(int i = 0; i < WARP_SIZE; i++){
if( max < h_array[i]){
max = h_array[i];
}
}
//print max value...
printf("The max integer in the array is: %d\n", h_array[0]);
printf("Cleaning up...\n");
free(h_array);
hipFree(d_array);
return 0;
} | f9fe1ccedd4148598e3f5dede27403bae59dd8d1.cu | /////////////////////////////////////////////////////////////////////////
// Parallel Computing Assignment 3
// Chris Jimenez
// 5/1/14
// This CUDA program finds the max integer in an array of random integers.
// This program DOES NOT use shared meemory and DOES take thread
// divergaence in to consideration. The modification can be seen
// in the kernel function with the use of the WARP_SIZE defined var.
//
/////////////////////////////////////////////////////////////////////////
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//define numebr of integers...
#define NUM_OF_INTEGERS 8192
//define max integer
#define MAX 100000
#define WARP_SIZE 32
///////////////////////////////////
/*The folllowing is dependent on whatever GPU this program is running on
if runnign on the NYU GPU's, the max threads per block is 512.
RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads
per block is 1024
*/
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK
/****** Function declarations */
void fill_array();
__global__ void get_max(int *array);
/********************************/
/////////////////////////////////////////////////////////
/*******************************************************/
/* Function fills the givne array a with random integers */
void fill_array(int *a){
int i;
time_t t;
/* Intializes random number generator */
srand((unsigned) time(&t));
for(i = 0; i < NUM_OF_INTEGERS; i++){
a[i] = random() % MAX;;
}
}
/*******************************************************/
/* Kernel Function finds the max integer in given array by
using reduction technique. Ultimately, the largest
will be located at the 0th position of the array */
__global__ void get_max(int *array){
int temp;
int index = threadIdx.x + (blockDim.x * blockIdx.x);
int nTotalThreads = NUM_OF_INTEGERS; // Total number of active threads
while(nTotalThreads > WARP_SIZE)
{
int halfPoint = nTotalThreads / 2; // divide by two
// only the first half of the threads will be active.
if (index < halfPoint){
temp = array[ index + halfPoint ];
if (temp > array[ index ]) {
array[index] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads / 2; // divide by two.
}
// at this point...nTotalThreads == 32
// that means that array[0:31] has the top
// 32 values...
}
/*******************************************************/
//Main function.....
int main(int argc, char *argv[]){
int *h_array; //array of random integers....
int *d_array; //device copy...
int max = 0;
printf("Initializing data...\n");
//allocating space for the array on host
h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int));
//fill in random array
fill_array(h_array);
//allocate space for array and resultmax on device
cudaMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS );
//Copy array from host to device...
cudaMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyHostToDevice);
//call kernel! using for loop
get_max<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_array);
//Copy array from device to host...
cudaMemcpy(h_array, d_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyDeviceToHost);
//given the top 32 largest numbers, search through to get max...
for(int i = 0; i < WARP_SIZE; i++){
if( max < h_array[i]){
max = h_array[i];
}
}
//print max value...
printf("The max integer in the array is: %d\n", h_array[0]);
printf("Cleaning up...\n");
free(h_array);
cudaFree(d_array);
return 0;
} |
70e312e6854b04d791e03930e864beef4f2869dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <iomanip>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
static float *d_x__;
static float *d_y__;
static float *d_logY__;
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
checkCudaErrors(hipFree(0));
float *imgPtr;
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr;
float *d_red, *d_green, *d_blue;
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(hipMalloc(&d_red, channelSize));
checkCudaErrors(hipMalloc(&d_green, channelSize));
checkCudaErrors(hipMalloc(&d_blue, channelSize));
checkCudaErrors(hipMalloc(&d_x__, channelSize));
checkCudaErrors(hipMalloc(&d_y__, channelSize));
checkCudaErrors(hipMalloc(&d_logY__, channelSize));
checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice));
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
*d_luminance = d_logY__;
*numberOfBins = numBins;
checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins));
hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads),
dim3(numThreads), 0, 0, d_cdf__,
d_cdf_normalized,
numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
checkCudaErrors(hipFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(hipFree(d_x__));
checkCudaErrors(hipFree(d_y__));
checkCudaErrors(hipFree(d_logY__));
checkCudaErrors(hipFree(d_cdf__));
} | 70e312e6854b04d791e03930e864beef4f2869dc.cu | #include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <iomanip>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
static float *d_x__;
static float *d_y__;
static float *d_logY__;
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
checkCudaErrors(cudaFree(0));
float *imgPtr;
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr;
float *d_red, *d_green, *d_blue;
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(cudaMalloc(&d_red, channelSize));
checkCudaErrors(cudaMalloc(&d_green, channelSize));
checkCudaErrors(cudaMalloc(&d_blue, channelSize));
checkCudaErrors(cudaMalloc(&d_x__, channelSize));
checkCudaErrors(cudaMalloc(&d_y__, channelSize));
checkCudaErrors(cudaMalloc(&d_logY__, channelSize));
checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice));
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
*d_luminance = d_logY__;
*numberOfBins = numBins;
checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins));
normalize_cdf<<< (numBins + numThreads - 1) / numThreads,
numThreads>>>(d_cdf__,
d_cdf_normalized,
numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
checkCudaErrors(cudaFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(cudaFree(d_x__));
checkCudaErrors(cudaFree(d_y__));
checkCudaErrors(cudaFree(d_logY__));
checkCudaErrors(cudaFree(d_cdf__));
} |
58b606aadcfac80469a4d22d9efe09c7a736cdc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
-- (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ahmad Abdelfattah ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
Technology nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include "syhemv_offset_core.cuh"
#if(SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (2)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (8)
#define chemv_lower_by (2)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#define chemv_lower_by (2)
#endif
/*************************************************************************************/
int kblas_chemv_offset_driver( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset,
hipStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(blocks == 0) return 0;
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
hipLaunchKernelGGL(( syhemvu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
}
else
{
hipLaunchKernelGGL(( syhemvu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
const int irregular_part = mod % elements_per_thread;
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 1:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 2:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 3:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 4:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 5:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 6:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 7:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 8:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(blocks == 0) return 0;
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvl_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
hipLaunchKernelGGL(( syhemvl_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
}
else
{
hipLaunchKernelGGL(( syhemvl_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
hipLaunchKernelGGL(( syhemvl_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_offset( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset)
{
return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset);
}
/*************************************************************************************/
extern "C"
int kblas_chemv_offset_async( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset,
hipStream_t stream)
{
return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream);
}
/*************************************************************************************/
| 58b606aadcfac80469a4d22d9efe09c7a736cdc9.cu | /**
-- (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ahmad Abdelfattah ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
Technology nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include "syhemv_offset_core.cuh"
#if(SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (2)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (8)
#define chemv_lower_by (2)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#define chemv_lower_by (2)
#endif
/*************************************************************************************/
int kblas_chemv_offset_driver( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset,
cudaStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(blocks == 0) return 0;
if(mod == 0)
{
syhemvu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
syhemvu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
}
else
{
syhemvu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
const int irregular_part = mod % elements_per_thread;
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 1: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 2: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 3: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 4: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 5: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 6: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 7: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
case 8: syhemvu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(blocks == 0) return 0;
if(mod == 0)
{
syhemvl_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
syhemvl_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_);
}
else
{
syhemvl_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
syhemvl_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_offset( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset)
{
return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset);
}
/*************************************************************************************/
extern "C"
int kblas_chemv_offset_async( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int offset,
cudaStream_t stream)
{
return kblas_chemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream);
}
/*************************************************************************************/
|
f4f3b733a24a2aa35719af422871bbacd7182d5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Hello World in CUDA
*
* CS3210
*
* This program start from "hello world" string and should print "HELLO WORLD"
*
*/
#include <stdio.h>
#define N 32
// #define DISCRETE
__global__ void hello(char *a, int len)
{
//int tid = threadIdx.x;
int block_index_in_grid = blockIdx.x * (gridDim.y * gridDim.z) + blockIdx.y * (gridDim.z) + blockIdx.z;
int thread_index_in_block = threadIdx.x * (blockDim.y * blockDim.z) + threadIdx.y * (blockDim.z) + threadIdx.z;
int tid = block_index_in_grid * (blockDim.x * blockDim.y * blockDim.z) + thread_index_in_block;
if (tid >= len)
return;
a[tid] += 'A' - 'a';
}
int main()
{
// original string
char a[N] = "hello@world";
// length
int len = strlen(a);
// pointer to the string on device
char* ad;
// pointer to the final string on host
char* ah;
// CUDA returned error code
hipError_t rc;
//allocate space for the string on device (GPU) memory
hipMalloc((void**)&ad, N);
hipMemcpy(ad, a, N, hipMemcpyHostToDevice);
// launch the kernel
dim3 gridDim(2, 2, 2);
dim3 blockDim(2, 4);
hipLaunchKernelGGL(( hello), dim3(gridDim), dim3(blockDim), 0, 0, ad, len);
hipDeviceSynchronize();
// for discrete GPUs, get the data from device memory to host memory
hipMemcpy(a, ad, N, hipMemcpyDeviceToHost);
ah = a;
// was there any error?
rc = hipGetLastError();
if (rc != hipSuccess)
printf("Last CUDA error %s\n", hipGetErrorString(rc));
// print final string
printf("%s!\n", ah);
// free memory
hipFree(ad);
return 0;
}
| f4f3b733a24a2aa35719af422871bbacd7182d5c.cu | /*
* Hello World in CUDA
*
* CS3210
*
* This program start from "hello world" string and should print "HELLO WORLD"
*
*/
#include <stdio.h>
#define N 32
// #define DISCRETE
__global__ void hello(char *a, int len)
{
//int tid = threadIdx.x;
int block_index_in_grid = blockIdx.x * (gridDim.y * gridDim.z) + blockIdx.y * (gridDim.z) + blockIdx.z;
int thread_index_in_block = threadIdx.x * (blockDim.y * blockDim.z) + threadIdx.y * (blockDim.z) + threadIdx.z;
int tid = block_index_in_grid * (blockDim.x * blockDim.y * blockDim.z) + thread_index_in_block;
if (tid >= len)
return;
a[tid] += 'A' - 'a';
}
int main()
{
// original string
char a[N] = "hello@world";
// length
int len = strlen(a);
// pointer to the string on device
char* ad;
// pointer to the final string on host
char* ah;
// CUDA returned error code
cudaError_t rc;
//allocate space for the string on device (GPU) memory
cudaMalloc((void**)&ad, N);
cudaMemcpy(ad, a, N, cudaMemcpyHostToDevice);
// launch the kernel
dim3 gridDim(2, 2, 2);
dim3 blockDim(2, 4);
hello<<<gridDim, blockDim>>>(ad, len);
cudaDeviceSynchronize();
// for discrete GPUs, get the data from device memory to host memory
cudaMemcpy(a, ad, N, cudaMemcpyDeviceToHost);
ah = a;
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// print final string
printf("%s!\n", ah);
// free memory
cudaFree(ad);
return 0;
}
|
3d2bd06c9262bbf160ad479a26e1c97580229754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********
* A fast algorithm to calculate the exact radiological path through a pxiel or voxel space
* Filip Jacobs, etc.
*/
__global__ void forward_ray_driven_3d_kernel_correction(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART) // projection correction (for SART)
}
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel(float *d_f_weightedLenSum , float *d_f_LenSum , float *d_proj_correction, float sin_theta, float cos_theta)
{
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
float proj_val = d_proj_correction[ proj_pixel_index];
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
}
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,y,z)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x_1,alpha_y_1,alpha_z_1) is the next x,y,z plane to go.
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-8)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c));
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
}
__global__ void update(float *d_f, float *d_f_weightedLenSum , float *d_f_LenSum, float beta)
{
int Idx_image_x = threadIdx.x;
int Idx_image_y = blockIdx.x;
int Idx_image_z = blockIdx.y;
int image_voxel_index = Idx_image_z*M*N + Idx_image_y*M + Idx_image_x;
if (d_f_LenSum[image_voxel_index] > volumn_x*1e-6)
d_f[image_voxel_index] += beta * d_f_weightedLenSum[image_voxel_index] / d_f_LenSum[image_voxel_index];
}
__global__ void reduce_norm_2_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]*g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_tv_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_l2(float *g_idata, float *g_odata, unsigned int n)
{
//load shared mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? fabs(g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_end(float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = (tid < n) ? fabs(g_idata[tid]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[0] = sqrt(sdata[0]);
}
__global__ void tv_gradient_matrix_3d_kernel(float *df, float *d_volumn, float epi)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
float s_add_1_tl, s_add_1_t_sub_1_l, s_add_1_t_l_sub_1;
float s_t_add_1_l, s_sub_1_t_add_1_l, s_t_add_1_l_sub_1;
float st_l_add_1, s_sub_1_t_l_add_1, s_t_sub_1_l_add_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
s_add_1_tl = d_volumn[(by_id+1)*N*M + bx_id*M + t_id];
s_add_1_t_sub_1_l = d_volumn[(by_id+1)*N*M + (bx_id-1)*M + t_id];
s_add_1_t_l_sub_1 = d_volumn[(by_id+1)*N*M + bx_id*M + t_id-1];
s_t_add_1_l = d_volumn[by_id*N*M + (bx_id+1)*M + t_id];
s_sub_1_t_add_1_l = d_volumn[(by_id-1)*N*M + (bx_id+1)*M + t_id];
s_t_add_1_l_sub_1 = d_volumn[by_id*N*M + (bx_id+1)*M + t_id-1];
st_l_add_1 =d_volumn[by_id*N*M + bx_id*M + t_id + 1];
s_sub_1_t_l_add_1 = d_volumn[(by_id-1)*N*M + bx_id*M + t_id + 1];
s_t_sub_1_l_add_1 = d_volumn[by_id*N*M + (bx_id-1)*M + t_id + 1];
df[by_id*N*M + bx_id*M + t_id] = ((stl - s_sub_1_tl) + (stl - s_t_sub_1_l) + (stl - st_l_sub_1) ) /sqrt(epi + (stl - s_sub_1_tl)* (stl - s_sub_1_tl) + (stl - s_t_sub_1_l)* (stl - s_t_sub_1_l) + (stl - st_l_sub_1)* (stl - st_l_sub_1) )
- (s_add_1_tl - stl)/sqrt(epi + (s_add_1_tl - stl)*(s_add_1_tl - stl) + (s_add_1_tl - s_add_1_t_sub_1_l)*(s_add_1_tl - s_add_1_t_sub_1_l) + (s_add_1_tl - s_add_1_t_l_sub_1)*(s_add_1_tl - s_add_1_t_l_sub_1))
- (s_t_add_1_l - stl)/sqrt(epi + (s_t_add_1_l - s_sub_1_t_add_1_l)*(s_t_add_1_l - s_sub_1_t_add_1_l) + (s_t_add_1_l - stl)*(s_t_add_1_l - stl) + (s_t_add_1_l - s_t_add_1_l_sub_1)* (s_t_add_1_l - s_t_add_1_l_sub_1))
- (st_l_add_1 - stl)/sqrt(epi + (st_l_add_1 - s_sub_1_t_l_add_1)*(st_l_add_1 - s_sub_1_t_l_add_1) + (st_l_add_1 - s_t_sub_1_l_add_1)*(st_l_add_1 - s_t_sub_1_l_add_1) + (st_l_add_1 - stl)* (st_l_add_1 - stl));
}
__global__ void tv_matrix_3d_kernel(float *df, float *d_volumn)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
df[by_id*N*M + bx_id*M + t_id] = sqrt( (stl - s_sub_1_tl)*(stl - s_sub_1_tl) + (stl - s_t_sub_1_l)*(stl - s_t_sub_1_l) + (stl - st_l_sub_1)*(stl - st_l_sub_1)) ;
}
__global__ void backtracking_update_kernel(float *d_volumn_f_update,float *d_volumn_f, float *d_tv_gradient_matrix ,float alpha_temp)
{
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
d_volumn_f_update[i] = d_volumn_f[i] - alpha_temp*d_tv_gradient_matrix[i];
}
///************ GHF new Code **************///
| 3d2bd06c9262bbf160ad479a26e1c97580229754.cu | /********
* A fast algorithm to calculate the exact radiological path through a pxiel or voxel space
* Filip Jacobs, etc.
*/
__global__ void forward_ray_driven_3d_kernel_correction(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART) // projection correction (for SART)
}
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel(float *d_f_weightedLenSum , float *d_f_LenSum , float *d_proj_correction, float sin_theta, float cos_theta)
{
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x,vertex_x2_y,vertex_x2_z;
if (CT_style==0) //CBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z;
}
else if (CT_style==1) //FBCT
{
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
else if (CT_style==2) //parallel beam
{
vertex_x2_x = Source_x * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
vertex_x2_y = Source_x * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
}
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
float proj_val = d_proj_correction[ proj_pixel_index];
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
}
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,y,z)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x_1,alpha_y_1,alpha_z_1) is the next x,y,z plane to go.
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-8)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c));
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
}
__global__ void update(float *d_f, float *d_f_weightedLenSum , float *d_f_LenSum, float beta)
{
int Idx_image_x = threadIdx.x;
int Idx_image_y = blockIdx.x;
int Idx_image_z = blockIdx.y;
int image_voxel_index = Idx_image_z*M*N + Idx_image_y*M + Idx_image_x;
if (d_f_LenSum[image_voxel_index] > volumn_x*1e-6)
d_f[image_voxel_index] += beta * d_f_weightedLenSum[image_voxel_index] / d_f_LenSum[image_voxel_index];
}
__global__ void reduce_norm_2_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]*g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_tv_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_l2(float *g_idata, float *g_odata, unsigned int n)
{
//load shared mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? fabs(g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_end(float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = (tid < n) ? fabs(g_idata[tid]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[0] = sqrt(sdata[0]);
}
__global__ void tv_gradient_matrix_3d_kernel(float *df, float *d_volumn, float epi)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
float s_add_1_tl, s_add_1_t_sub_1_l, s_add_1_t_l_sub_1;
float s_t_add_1_l, s_sub_1_t_add_1_l, s_t_add_1_l_sub_1;
float st_l_add_1, s_sub_1_t_l_add_1, s_t_sub_1_l_add_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
s_add_1_tl = d_volumn[(by_id+1)*N*M + bx_id*M + t_id];
s_add_1_t_sub_1_l = d_volumn[(by_id+1)*N*M + (bx_id-1)*M + t_id];
s_add_1_t_l_sub_1 = d_volumn[(by_id+1)*N*M + bx_id*M + t_id-1];
s_t_add_1_l = d_volumn[by_id*N*M + (bx_id+1)*M + t_id];
s_sub_1_t_add_1_l = d_volumn[(by_id-1)*N*M + (bx_id+1)*M + t_id];
s_t_add_1_l_sub_1 = d_volumn[by_id*N*M + (bx_id+1)*M + t_id-1];
st_l_add_1 =d_volumn[by_id*N*M + bx_id*M + t_id + 1];
s_sub_1_t_l_add_1 = d_volumn[(by_id-1)*N*M + bx_id*M + t_id + 1];
s_t_sub_1_l_add_1 = d_volumn[by_id*N*M + (bx_id-1)*M + t_id + 1];
df[by_id*N*M + bx_id*M + t_id] = ((stl - s_sub_1_tl) + (stl - s_t_sub_1_l) + (stl - st_l_sub_1) ) /sqrt(epi + (stl - s_sub_1_tl)* (stl - s_sub_1_tl) + (stl - s_t_sub_1_l)* (stl - s_t_sub_1_l) + (stl - st_l_sub_1)* (stl - st_l_sub_1) )
- (s_add_1_tl - stl)/sqrt(epi + (s_add_1_tl - stl)*(s_add_1_tl - stl) + (s_add_1_tl - s_add_1_t_sub_1_l)*(s_add_1_tl - s_add_1_t_sub_1_l) + (s_add_1_tl - s_add_1_t_l_sub_1)*(s_add_1_tl - s_add_1_t_l_sub_1))
- (s_t_add_1_l - stl)/sqrt(epi + (s_t_add_1_l - s_sub_1_t_add_1_l)*(s_t_add_1_l - s_sub_1_t_add_1_l) + (s_t_add_1_l - stl)*(s_t_add_1_l - stl) + (s_t_add_1_l - s_t_add_1_l_sub_1)* (s_t_add_1_l - s_t_add_1_l_sub_1))
- (st_l_add_1 - stl)/sqrt(epi + (st_l_add_1 - s_sub_1_t_l_add_1)*(st_l_add_1 - s_sub_1_t_l_add_1) + (st_l_add_1 - s_t_sub_1_l_add_1)*(st_l_add_1 - s_t_sub_1_l_add_1) + (st_l_add_1 - stl)* (st_l_add_1 - stl));
}
__global__ void tv_matrix_3d_kernel(float *df, float *d_volumn)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
df[by_id*N*M + bx_id*M + t_id] = sqrt( (stl - s_sub_1_tl)*(stl - s_sub_1_tl) + (stl - s_t_sub_1_l)*(stl - s_t_sub_1_l) + (stl - st_l_sub_1)*(stl - st_l_sub_1)) ;
}
__global__ void backtracking_update_kernel(float *d_volumn_f_update,float *d_volumn_f, float *d_tv_gradient_matrix ,float alpha_temp)
{
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
d_volumn_f_update[i] = d_volumn_f[i] - alpha_temp*d_tv_gradient_matrix[i];
}
///************ GHF new Code **************///
|
e75c751dc2489f081ee682c6119a839e53f6c24c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void golGpu(int height, int width, unsigned char* pBuffer1, unsigned char* pBuffer2){
int x = blockIdx.x * 2 + threadIdx.x;
int y = blockIdx.y * 2 + threadIdx.y;
int indx = x * height + y;
pBuffer2[indx] = pBuffer1[indx];
int num = 0;
if (x-1 >= 0 && x-1 < height && y >= 0 && y < width)
num += pBuffer1[(x-1) * height + y];
if (x+1 >= 0 && x+1 < height && y >= 0 && y < width)
num += pBuffer1[(x+1) * height + y];
if (x >= 0 && x < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[x * height + (y-1)];
if (x >= 0 && x < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[x * height + (y+1)];
if (x-1 >= 0 && x-1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x-1) * height + (y-1)];
if (x-1 >= 0 && x-1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x-1) * height + (y+1)];
if (x+1 >= 0 && x+1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x+1) * height + (y-1)];
if (x+1 >= 0 && x+1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x+1) * height + (y+1)];
if(num < 2)
pBuffer2[indx] = 0x0;
if(num > 3)
pBuffer2[indx] = 0x0;
if(num == 3 && !pBuffer1[indx])
pBuffer2[indx] = 0x1;
//return num;
} | e75c751dc2489f081ee682c6119a839e53f6c24c.cu | #include "includes.h"
__global__ void golGpu(int height, int width, unsigned char* pBuffer1, unsigned char* pBuffer2){
int x = blockIdx.x * 2 + threadIdx.x;
int y = blockIdx.y * 2 + threadIdx.y;
int indx = x * height + y;
pBuffer2[indx] = pBuffer1[indx];
int num = 0;
if (x-1 >= 0 && x-1 < height && y >= 0 && y < width)
num += pBuffer1[(x-1) * height + y];
if (x+1 >= 0 && x+1 < height && y >= 0 && y < width)
num += pBuffer1[(x+1) * height + y];
if (x >= 0 && x < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[x * height + (y-1)];
if (x >= 0 && x < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[x * height + (y+1)];
if (x-1 >= 0 && x-1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x-1) * height + (y-1)];
if (x-1 >= 0 && x-1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x-1) * height + (y+1)];
if (x+1 >= 0 && x+1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x+1) * height + (y-1)];
if (x+1 >= 0 && x+1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x+1) * height + (y+1)];
if(num < 2)
pBuffer2[indx] = 0x0;
if(num > 3)
pBuffer2[indx] = 0x0;
if(num == 3 && !pBuffer1[indx])
pBuffer2[indx] = 0x1;
//return num;
} |
60cdd091d9f4d549cf5283e4031594c84ba54845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudacpp/Runtime.h>
#include <cstdio>
__device__ void moveToShared(const float * mat, float subMat[16][16], const int matrixFullSize, const int rowStart, const int colStart)
{
subMat[threadIdx.x][threadIdx.y] = mat[matrixFullSize * (rowStart + threadIdx.x) + colStart + threadIdx.y];
}
__device__ void addFromShared(float * mat, const float subMat[16][16], const int rowStart, const int colStart)
{
mat[256 * (rowStart + threadIdx.x) + colStart + threadIdx.y] += subMat[threadIdx.x][threadIdx.y];
}
__device__ void matMul16x16(const float a[16][16],
const float b[16][16],
float c[16][16])
{
for (int i = 0; i < 16; ++i)
{
c[threadIdx.x][threadIdx.y] += a[threadIdx.x][i] * b[i][threadIdx.y];
}
}
__global__ void matMulZeroAndEmit(float * const D, const int key, int * const keySpace)
{
D[blockIdx.x * blockDim.x + threadIdx.x] = 0.0f;
// D[gridDim.x * threadIdx.x + blockIdx.x] = 0.0f;
*keySpace = key;
}
__device__ void matMul256x256(const float * const A,
const float * const B,
float * const C,
const int matrixFullSize,
const int aRowStart,
const int aColStart,
const int bRowStart,
const int bColStart)
{
__shared__ float subA[16][16];
__shared__ float subB[16][16];
__shared__ float subC[16][16];
subC[threadIdx.y][threadIdx.x] = 0;
for (int i = 0; i < 16; ++i)
{
moveToShared(A, subA, matrixFullSize, aRowStart + i * 16, aColStart);
for (int j = 0; j < 16; ++j)
{
moveToShared(B, subB, matrixFullSize, bRowStart, bColStart + j * 16);
__syncthreads();
matMul16x16(subA, subB, subC);
addFromShared(C, subC, i * 16, j * 16);
}
}
}
__global__ void matMulRowXCol(const float * const A,
const float * const B,
float * const C,
const int matrixFullSize,
const int aRowStart,
const int bColStart)
{
matMul256x256(A,
B,
C + 256 * 256 * (blockIdx.x * blockDim.x + blockIdx.y),
matrixFullSize,
aRowStart,
256 * blockIdx.x,
256 * blockIdx.y,
bColStart);
}
__global__ void matMulCombine( float * const C,
const float * const D,
const int matrixFullSize,
const int cRowStart,
const int cColStart)
{
__shared__ float sum[16][16];
const int numSums = matrixFullSize / 256;
sum[threadIdx.x][threadIdx.y] = 0;
const int rowSize = 256;
const int colSize = 256;
const int majorRowOffset = blockIdx.x * 16;
const int majorColOffset = blockIdx.y * 16;
const int minorRowOffset = threadIdx.x;
const int minorColOffset = threadIdx.y;
const int rowOffset = (majorRowOffset + minorRowOffset) * rowSize;
const int colOffset = majorColOffset + minorColOffset;
for (int i = 0; i < numSums; ++i)
{
const int matrixOffset = rowSize * colSize * i;
sum[threadIdx.x][threadIdx.y] += D[matrixOffset + rowOffset + colOffset];
}
C[cRowStart * matrixFullSize + rowOffset + colOffset + cColStart] = sum[threadIdx.x][threadIdx.y];
}
__host__ void matMulMapperExecute(const float * const A,
const float * const B,
float * const C,
float * const D,
const int matrixFullSize,
const int key,
int * const keySpace,
hipStream_t & stream)
{
dim3 gs(1, 1, 1);
dim3 bs(16, 16, 1);
hipLaunchKernelGGL(( matMulZeroAndEmit), dim3(matrixFullSize), dim3(256), 0, stream, D, key, keySpace);
gs.x = gs.y = matrixFullSize / 256;
for (int i = 0; i < matrixFullSize / 256; ++i)
{
for (int j = 0; j < matrixFullSize / 256; ++j)
{
hipLaunchKernelGGL(( matMulRowXCol), dim3(gs), dim3(bs), 0, stream, A, B, D, matrixFullSize, i * 256, j * 256);
hipLaunchKernelGGL(( matMulCombine), dim3(bs), dim3(bs), 0, stream, C, D, matrixFullSize, i * 256, j * 256);
}
}
}
| 60cdd091d9f4d549cf5283e4031594c84ba54845.cu | #include <cudacpp/Runtime.h>
#include <cstdio>
__device__ void moveToShared(const float * mat, float subMat[16][16], const int matrixFullSize, const int rowStart, const int colStart)
{
subMat[threadIdx.x][threadIdx.y] = mat[matrixFullSize * (rowStart + threadIdx.x) + colStart + threadIdx.y];
}
__device__ void addFromShared(float * mat, const float subMat[16][16], const int rowStart, const int colStart)
{
mat[256 * (rowStart + threadIdx.x) + colStart + threadIdx.y] += subMat[threadIdx.x][threadIdx.y];
}
__device__ void matMul16x16(const float a[16][16],
const float b[16][16],
float c[16][16])
{
for (int i = 0; i < 16; ++i)
{
c[threadIdx.x][threadIdx.y] += a[threadIdx.x][i] * b[i][threadIdx.y];
}
}
__global__ void matMulZeroAndEmit(float * const D, const int key, int * const keySpace)
{
D[blockIdx.x * blockDim.x + threadIdx.x] = 0.0f;
// D[gridDim.x * threadIdx.x + blockIdx.x] = 0.0f;
*keySpace = key;
}
__device__ void matMul256x256(const float * const A,
const float * const B,
float * const C,
const int matrixFullSize,
const int aRowStart,
const int aColStart,
const int bRowStart,
const int bColStart)
{
__shared__ float subA[16][16];
__shared__ float subB[16][16];
__shared__ float subC[16][16];
subC[threadIdx.y][threadIdx.x] = 0;
for (int i = 0; i < 16; ++i)
{
moveToShared(A, subA, matrixFullSize, aRowStart + i * 16, aColStart);
for (int j = 0; j < 16; ++j)
{
moveToShared(B, subB, matrixFullSize, bRowStart, bColStart + j * 16);
__syncthreads();
matMul16x16(subA, subB, subC);
addFromShared(C, subC, i * 16, j * 16);
}
}
}
__global__ void matMulRowXCol(const float * const A,
const float * const B,
float * const C,
const int matrixFullSize,
const int aRowStart,
const int bColStart)
{
matMul256x256(A,
B,
C + 256 * 256 * (blockIdx.x * blockDim.x + blockIdx.y),
matrixFullSize,
aRowStart,
256 * blockIdx.x,
256 * blockIdx.y,
bColStart);
}
__global__ void matMulCombine( float * const C,
const float * const D,
const int matrixFullSize,
const int cRowStart,
const int cColStart)
{
__shared__ float sum[16][16];
const int numSums = matrixFullSize / 256;
sum[threadIdx.x][threadIdx.y] = 0;
const int rowSize = 256;
const int colSize = 256;
const int majorRowOffset = blockIdx.x * 16;
const int majorColOffset = blockIdx.y * 16;
const int minorRowOffset = threadIdx.x;
const int minorColOffset = threadIdx.y;
const int rowOffset = (majorRowOffset + minorRowOffset) * rowSize;
const int colOffset = majorColOffset + minorColOffset;
for (int i = 0; i < numSums; ++i)
{
const int matrixOffset = rowSize * colSize * i;
sum[threadIdx.x][threadIdx.y] += D[matrixOffset + rowOffset + colOffset];
}
C[cRowStart * matrixFullSize + rowOffset + colOffset + cColStart] = sum[threadIdx.x][threadIdx.y];
}
__host__ void matMulMapperExecute(const float * const A,
const float * const B,
float * const C,
float * const D,
const int matrixFullSize,
const int key,
int * const keySpace,
cudaStream_t & stream)
{
dim3 gs(1, 1, 1);
dim3 bs(16, 16, 1);
matMulZeroAndEmit<<<matrixFullSize, 256, 0, stream>>>(D, key, keySpace);
gs.x = gs.y = matrixFullSize / 256;
for (int i = 0; i < matrixFullSize / 256; ++i)
{
for (int j = 0; j < matrixFullSize / 256; ++j)
{
matMulRowXCol<<<gs, bs, 0, stream>>>(A, B, D, matrixFullSize, i * 256, j * 256);
matMulCombine<<<bs, bs, 0, stream>>>(C, D, matrixFullSize, i * 256, j * 256);
}
}
}
|
4c3714dc46a7fa28353197efdc8d2e291c171a23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Implementation of a Dense (Tensorflow) or Fully Connected (PyTorch) network layer
*
* @author: Yvo Elling
* @date: 10-03-23
*/
#include <iostream>
#include <cstdlib>
#include <cstdint>
#include <array>
#include <chrono>
#include <stdio.h>
#include "hw_data.h"
typedef uint8_t CoreIdx;
#define VECTOR_LENGTH 1'000'000
#define NROF_TEST_RUNS 100
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
__global__ void computeDenseLayerCUDA(float* weights, float* input, float* bias, float* output) {
CoreIdx idx = threadIdx.x;
float nodeOutputSum = 0.0f;
for (int i = 0; i < VECTOR_LENGTH; ++i) {
nodeOutputSum += input[idx] * weights[i] + bias[i];
}
output[idx] = nodeOutputSum;
}
void computeDenseLayerCPU(float* weights, float* input, float* bias, float* output) {
float nodeOutputSum = 0.0f;
auto t1 = high_resolution_clock::now();
for (int idx = 0; idx < VECTOR_LENGTH; ++idx) {
for (int i = 0; i < VECTOR_LENGTH; ++i) {
nodeOutputSum += input[idx] * weights[i] + bias[i];
}
output[idx] = nodeOutputSum;
}
auto t2 = high_resolution_clock::now();
int total_execution_time = duration_cast<milliseconds>(t2 - t1).count();
std::cout << "Total execution time on CPU is: " << total_execution_time << " ms" << std::endl;
}
int main (int argc, char** argv) {
std::cout << "Starting CUDA Application" << std::endl;
std::cout << "Launching CUDA Program for Dense Layer" << std::endl;
auto h_weights = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_input = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_bias = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_output = (float *)calloc(VECTOR_LENGTH, sizeof(float));
float *d_weights, *d_input, *d_bias, *d_output;
hipMalloc((void**)&d_weights, VECTOR_LENGTH * sizeof(float));
hipMalloc((void**)&d_input, VECTOR_LENGTH * sizeof(float));
hipMalloc((void**)&d_bias, VECTOR_LENGTH * sizeof(float));
hipMalloc((void**)&d_output, VECTOR_LENGTH * sizeof(float));
hipMemcpy(d_weights, h_weights, VECTOR_LENGTH*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_input, h_input, VECTOR_LENGTH*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_bias, h_bias, VECTOR_LENGTH*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_output, h_output, VECTOR_LENGTH*sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
std::array<float, NROF_TEST_RUNS> execution_times;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
hipEventRecord(start);
hipLaunchKernelGGL(( computeDenseLayerCUDA), dim3(QUADRO_P2000_SM*3), dim3(QUADRO_P200_THREADS_PER_SM*3), 0, 0, d_weights, d_input, d_bias, d_output);
hipEventRecord(stop);
hipDeviceSynchronize();
hipMemcpy(h_output, d_output, VECTOR_LENGTH*sizeof(float), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
execution_times[i] = milliseconds;
}
float execution_time_sum = 0;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
execution_time_sum += execution_times[i];
}
float avg_execution_time = execution_time_sum / execution_times.size();
std::cout << "Total average kernel execution time is: " << avg_execution_time << "ms" << std::endl;
computeDenseLayerCPU(h_weights, h_input, h_bias, h_output);
hipFree(d_weights);
hipFree(d_input);
hipFree(d_bias);
hipFree(d_output);
free(h_weights);
free(h_input);
free(h_bias);
free(h_output);
} | 4c3714dc46a7fa28353197efdc8d2e291c171a23.cu | /**
* Implementation of a Dense (Tensorflow) or Fully Connected (PyTorch) network layer
*
* @author: Yvo Elling
* @date: 10-03-23
*/
#include <iostream>
#include <cstdlib>
#include <cstdint>
#include <array>
#include <chrono>
#include <stdio.h>
#include "hw_data.h"
typedef uint8_t CoreIdx;
#define VECTOR_LENGTH 1'000'000
#define NROF_TEST_RUNS 100
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
__global__ void computeDenseLayerCUDA(float* weights, float* input, float* bias, float* output) {
CoreIdx idx = threadIdx.x;
float nodeOutputSum = 0.0f;
for (int i = 0; i < VECTOR_LENGTH; ++i) {
nodeOutputSum += input[idx] * weights[i] + bias[i];
}
output[idx] = nodeOutputSum;
}
void computeDenseLayerCPU(float* weights, float* input, float* bias, float* output) {
float nodeOutputSum = 0.0f;
auto t1 = high_resolution_clock::now();
for (int idx = 0; idx < VECTOR_LENGTH; ++idx) {
for (int i = 0; i < VECTOR_LENGTH; ++i) {
nodeOutputSum += input[idx] * weights[i] + bias[i];
}
output[idx] = nodeOutputSum;
}
auto t2 = high_resolution_clock::now();
int total_execution_time = duration_cast<milliseconds>(t2 - t1).count();
std::cout << "Total execution time on CPU is: " << total_execution_time << " ms" << std::endl;
}
int main (int argc, char** argv) {
std::cout << "Starting CUDA Application" << std::endl;
std::cout << "Launching CUDA Program for Dense Layer" << std::endl;
auto h_weights = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_input = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_bias = (float *)calloc(VECTOR_LENGTH, sizeof(float));
auto h_output = (float *)calloc(VECTOR_LENGTH, sizeof(float));
float *d_weights, *d_input, *d_bias, *d_output;
cudaMalloc((void**)&d_weights, VECTOR_LENGTH * sizeof(float));
cudaMalloc((void**)&d_input, VECTOR_LENGTH * sizeof(float));
cudaMalloc((void**)&d_bias, VECTOR_LENGTH * sizeof(float));
cudaMalloc((void**)&d_output, VECTOR_LENGTH * sizeof(float));
cudaMemcpy(d_weights, h_weights, VECTOR_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, h_input, VECTOR_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_bias, h_bias, VECTOR_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_output, h_output, VECTOR_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::array<float, NROF_TEST_RUNS> execution_times;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
cudaEventRecord(start);
computeDenseLayerCUDA<<<QUADRO_P2000_SM*3, QUADRO_P200_THREADS_PER_SM*3>>>(d_weights, d_input, d_bias, d_output);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_output, d_output, VECTOR_LENGTH*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
execution_times[i] = milliseconds;
}
float execution_time_sum = 0;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
execution_time_sum += execution_times[i];
}
float avg_execution_time = execution_time_sum / execution_times.size();
std::cout << "Total average kernel execution time is: " << avg_execution_time << "ms" << std::endl;
computeDenseLayerCPU(h_weights, h_input, h_bias, h_output);
cudaFree(d_weights);
cudaFree(d_input);
cudaFree(d_bias);
cudaFree(d_output);
free(h_weights);
free(h_input);
free(h_bias);
free(h_output);
} |
ca773018808071a2c954568cafcf0f6f15007bb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* 1.73:cc: entry function 'generate' used 34 registers, 0 bytes smem, 0 bytes lmem, 0 bytes cmem
* ... multiprocessor occupancy 79.7% : 1632 threads over 51 warps in 3 blocksbenchmarking mandelbrot
*
* collecting 100 samples, 1 iterations each, in estimated 162.7669 s
* mean: 16.46181 ms, lb 15.81836 ms, ub 17.53077 ms, ci 0.950
* std dev: 4.200961 ms, lb 2.716338 ms, ub 6.127911 ms, ci 0.950
* found 6 outliers among 100 samples (6.0%)
* 5 (5.0%) high severe
* variance introduced by outliers: 96.774%
* variance is severely inflated by outliers
*
*
* After changing the type of booleans to Word32 to avoid a single cvt.u8.u32 in
* the inner loop:
*
* benchmarking mandelbrot
* collecting 100 samples, 1 iterations each, in estimated 161.6304 s
* mean: 12.82615 ms, lb 12.51806 ms, ub 13.14113 ms, ci 0.950
* std dev: 1.593406 ms, lb 1.439425 ms, ub 1.792564 ms, ci 0.950
* variance introduced by outliers: 85.251%
* variance is severely inflated by outliers
*/
#include <accelerate_cuda.h>
typedef DIM2 DimOut;
extern "C" __global__ void generate(const DIM0 shIn0, const float* __restrict__ arrIn0_a3, const float* __restrict__ arrIn0_a2, const float* __restrict__ arrIn0_a1, const float* __restrict__ arrIn0_a0, const DIM2 shOut, Word32* __restrict__ arrOut_a0)
{
const int shapeSize = size(shOut);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const DimOut sh = fromIndex(shOut, ix);
const Int64 v0 = (Int64) 255;
const int v1 = toIndex(shIn0, shape());
const float v2 = arrIn0_a3[v1];
const float v3 = arrIn0_a2[v1];
const float v4 = arrIn0_a1[v1];
const float v5 = arrIn0_a0[v1];
const Int64 v6 = sh.a1;
const Int64 v7 = sh.a0;
const float v8 = v2 + (float) v7 * (v4 - v2) / 800.0f;
const float v9 = v3 + (float) v6 * (v5 - v3) / 600.0f;
const int v13 = (Int64) 255;
float v10 = v8;
float v11 = v9;
Int64 v12 = (Int64) 0;
for (int v19 = 0; v19 < v13; ++v19) {
const float v14 = v10 * v10 - v11 * v11;
const float v15 = v10 * v11 + v11 * v10;
const float v16 = v8 + v14;
const float v17 = v9 + v15;
const Word32 v18 = v16 * v16 + v17 * v17 > 4.0f; // change meeeee
v10 = v18 ? v10 : v16;
v11 = v18 ? v11 : v17;
v12 = v18 ? v12 : (Int64) 1 + v12;
}
const Int64 v20 = (Int64) v12;
const Word32 v21 = v0 == v20; // change meeeee
const Int64 v22 = v0 - v20;
const Word8 v23 = (Word8) 0;
const Word8 v24 = (Word8) ((Int64) 7 * v22);
const Word8 v25 = (Word8) ((Int64) 5 * v22);
const Word8 v26 = (Word8) ((Int64) 3 * v22);
arrOut_a0[ix] = v21 ? (Word32) 4278190080 : (Word32) 4294967295 - ((Word32) v23 + (Word32) 256 * (Word32) v24 + (Word32) 65536 * (Word32) v25 + (Word32) 16777216 * (Word32) v26);
}
}
| ca773018808071a2c954568cafcf0f6f15007bb7.cu | /*
* 1.73:cc: entry function 'generate' used 34 registers, 0 bytes smem, 0 bytes lmem, 0 bytes cmem
* ... multiprocessor occupancy 79.7% : 1632 threads over 51 warps in 3 blocksbenchmarking mandelbrot
*
* collecting 100 samples, 1 iterations each, in estimated 162.7669 s
* mean: 16.46181 ms, lb 15.81836 ms, ub 17.53077 ms, ci 0.950
* std dev: 4.200961 ms, lb 2.716338 ms, ub 6.127911 ms, ci 0.950
* found 6 outliers among 100 samples (6.0%)
* 5 (5.0%) high severe
* variance introduced by outliers: 96.774%
* variance is severely inflated by outliers
*
*
* After changing the type of booleans to Word32 to avoid a single cvt.u8.u32 in
* the inner loop:
*
* benchmarking mandelbrot
* collecting 100 samples, 1 iterations each, in estimated 161.6304 s
* mean: 12.82615 ms, lb 12.51806 ms, ub 13.14113 ms, ci 0.950
* std dev: 1.593406 ms, lb 1.439425 ms, ub 1.792564 ms, ci 0.950
* variance introduced by outliers: 85.251%
* variance is severely inflated by outliers
*/
#include <accelerate_cuda.h>
typedef DIM2 DimOut;
extern "C" __global__ void generate(const DIM0 shIn0, const float* __restrict__ arrIn0_a3, const float* __restrict__ arrIn0_a2, const float* __restrict__ arrIn0_a1, const float* __restrict__ arrIn0_a0, const DIM2 shOut, Word32* __restrict__ arrOut_a0)
{
const int shapeSize = size(shOut);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const DimOut sh = fromIndex(shOut, ix);
const Int64 v0 = (Int64) 255;
const int v1 = toIndex(shIn0, shape());
const float v2 = arrIn0_a3[v1];
const float v3 = arrIn0_a2[v1];
const float v4 = arrIn0_a1[v1];
const float v5 = arrIn0_a0[v1];
const Int64 v6 = sh.a1;
const Int64 v7 = sh.a0;
const float v8 = v2 + (float) v7 * (v4 - v2) / 800.0f;
const float v9 = v3 + (float) v6 * (v5 - v3) / 600.0f;
const int v13 = (Int64) 255;
float v10 = v8;
float v11 = v9;
Int64 v12 = (Int64) 0;
for (int v19 = 0; v19 < v13; ++v19) {
const float v14 = v10 * v10 - v11 * v11;
const float v15 = v10 * v11 + v11 * v10;
const float v16 = v8 + v14;
const float v17 = v9 + v15;
const Word32 v18 = v16 * v16 + v17 * v17 > 4.0f; // change meeeee
v10 = v18 ? v10 : v16;
v11 = v18 ? v11 : v17;
v12 = v18 ? v12 : (Int64) 1 + v12;
}
const Int64 v20 = (Int64) v12;
const Word32 v21 = v0 == v20; // change meeeee
const Int64 v22 = v0 - v20;
const Word8 v23 = (Word8) 0;
const Word8 v24 = (Word8) ((Int64) 7 * v22);
const Word8 v25 = (Word8) ((Int64) 5 * v22);
const Word8 v26 = (Word8) ((Int64) 3 * v22);
arrOut_a0[ix] = v21 ? (Word32) 4278190080 : (Word32) 4294967295 - ((Word32) v23 + (Word32) 256 * (Word32) v24 + (Word32) 65536 * (Word32) v25 + (Word32) 16777216 * (Word32) v26);
}
}
|
659d854a62b71fa186320db9e89eed6a779639ae.hip | // !!! This is a file automatically generated by hipify!!!
/// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stddef.h>
#include <openssl/rand.h>
#include "portable_endian.h"
#include <time.h>
#define INPUT_LEN (32)
#define N ((INPUT_LEN+8+1)+3) >> 2
#define M (INPUT_LEN >> 2)
#define OUTPUT_LEN 32
#define THREADS_PER_BLOCK (512)
#define MAX_HASH_NUM (1<<30)
#define MAX_GPU_NUM (1024)
#define HASH_NUM (1<<27)
#define DELIMITER (0x06)
#define ROUND (43)
#define ROL32(a,b) (((a)<<(b))|((a)>>(32-(b))))
#define ROL_ADD(a,b) a += b; a = ROL32(a, 8); b = ROL32(b, 24) + a;
#define EaglesongPermutation() \
{ \
for(int i = 0, j=0; i < ROUND ; ++i, j+=16) { \
tmp = s0 ^ s4 ^ s12 ^ s15; s0 = tmp^s5 ^ s6 ^ s7; s1 = tmp^s1 ^ s8 ^ s13; \
tmp = s1 ^ s2 ^ s6 ^ s14; s2 = tmp^s7 ^ s8 ^ s9; s3 = tmp^s3 ^ s10 ^ s15; \
tmp = s0 ^ s3 ^ s4 ^ s8; s4 = tmp^s9 ^ s10 ^ s11; s5 = tmp^s1 ^ s5 ^ s12; \
tmp = s2 ^ s5 ^ s6 ^ s10; s6 = tmp^s11 ^ s12 ^ s13; s7 = tmp^s3 ^ s7 ^ s14; \
tmp = s4 ^ s7 ^ s8 ^ s12; s8 = tmp^s13 ^ s14 ^ s15; s9 = tmp^s0 ^ s5 ^ s9; \
tmp = s6 ^ s9 ^ s10 ^ s14; s10 = tmp^s0 ^ s1 ^ s15; s11 = tmp^s2 ^ s7 ^ s11; \
tmp = s0 ^ s8 ^ s11 ^ s12; s12 = tmp^s1 ^ s2 ^ s3; s13 = tmp^s4 ^ s9 ^ s13; \
tmp = s3 ^ s5 ^ s13 ^ s14; s14 = tmp^s2 ^ s4 ^ s10; s15 = tmp^s0 ^ s1 ^ s6 ^ s7 ^ s8 ^ s9 ^ s15; \
s0 ^= ROL32(s0, 2) ^ ROL32(s0, 4) ^ gpu_injection_constants[(j ^ 0)]; \
s1 ^= ROL32(s1, 13) ^ ROL32(s1, 22) ^ gpu_injection_constants[(j ^ 1)]; \
ROL_ADD(s0, s1); \
s2 ^= ROL32(s2, 4) ^ ROL32(s2, 19) ^ gpu_injection_constants[(j ^ 2)]; \
s3 ^= ROL32(s3, 3) ^ ROL32(s3, 14) ^ gpu_injection_constants[(j ^ 3)]; \
ROL_ADD(s2, s3); \
s4 ^= ROL32(s4, 27) ^ ROL32(s4, 31) ^ gpu_injection_constants[(j ^ 4)]; \
s5 ^= ROL32(s5, 3) ^ ROL32(s5, 8) ^ gpu_injection_constants[(j ^ 5)]; \
ROL_ADD(s4, s5); \
s6 ^= ROL32(s6, 17) ^ ROL32(s6, 26) ^ gpu_injection_constants[(j ^ 6)]; \
s7 ^= ROL32(s7, 3) ^ ROL32(s7, 12) ^ gpu_injection_constants[(j ^ 7)]; \
ROL_ADD(s6, s7); \
s8 ^= ROL32(s8, 18) ^ ROL32(s8, 22) ^ gpu_injection_constants[(j ^ 8)]; \
s9 ^= ROL32(s9, 12) ^ ROL32(s9, 18) ^ gpu_injection_constants[(j ^ 9)]; \
ROL_ADD(s8, s9); \
s10 ^= ROL32(s10, 4) ^ ROL32(s10, 7) ^ gpu_injection_constants[(j ^ 10)]; \
s11 ^= ROL32(s11, 4) ^ ROL32(s11, 31) ^ gpu_injection_constants[(j ^ 11)]; \
ROL_ADD(s10, s11); \
s12 ^= ROL32(s12, 12) ^ ROL32(s12, 27) ^ gpu_injection_constants[(j ^ 12)]; \
s13 ^= ROL32(s13, 7) ^ ROL32(s13, 17) ^ gpu_injection_constants[(j ^ 13)]; \
ROL_ADD(s12, s13); \
s14 ^= ROL32(s14, 7) ^ ROL32(s14, 8) ^ gpu_injection_constants[(j ^ 14)]; \
s15 ^= ROL32(s15, 1) ^ ROL32(s15, 13) ^ gpu_injection_constants[(j ^ 15)]; \
ROL_ADD(s14, s15); \
} \
}
__constant__ uint32_t gpu_injection_constants[688] = //16*43 = 2752B
{
0x6e9e40ae, 0x71927c02, 0x9a13d3b1, 0xdaec32ad, 0x3d8951cf, 0xe1c9fe9a, 0xb806b54c, 0xacbbf417,
0xd3622b3b, 0xa082762a, 0x9edcf1c0, 0xa9bada77, 0x7f91e46c, 0xcb0f6e4f, 0x265d9241, 0xb7bdeab0,
0x6260c9e6, 0xff50dd2a, 0x9036aa71, 0xce161879, 0xd1307cdf, 0x89e456df, 0xf83133e2, 0x65f55c3d,
0x94871b01, 0xb5d204cd, 0x583a3264, 0x5e165957, 0x4cbda964, 0x675fca47, 0xf4a3033e, 0x2a417322,
0x3b61432f, 0x7f5532f2, 0xb609973b, 0x1a795239, 0x31b477c9, 0xd2949d28, 0x78969712, 0x0eb87b6e,
0x7e11d22d, 0xccee88bd, 0xeed07eb8, 0xe5563a81, 0xe7cb6bcf, 0x25de953e, 0x4d05653a, 0x0b831557,
0x94b9cd77, 0x13f01579, 0x794b4a4a, 0x67e7c7dc, 0xc456d8d4, 0x59689c9b, 0x668456d7, 0x22d2a2e1,
0x38b3a828, 0x0315ac3c, 0x438d681e, 0xab7109c5, 0x97ee19a8, 0xde062b2e, 0x2c76c47b, 0x0084456f,
0x908f0fd3, 0xa646551f, 0x3e826725, 0xd521788e, 0x9f01c2b0, 0x93180cdc, 0x92ea1df8, 0x431a9aae,
0x7c2ea356, 0xda33ad03, 0x46926893, 0x66bde7d7, 0xb501cc75, 0x1f6e8a41, 0x685250f4, 0x3bb1f318,
0xaf238c04, 0x974ed2ec, 0x5b159e49, 0xd526f8bf, 0x12085626, 0x3e2432a9, 0x6bd20c48, 0x1f1d59da,
0x18ab1068, 0x80f83cf8, 0x2c8c11c0, 0x7d548035, 0x0ff675c3, 0xfed160bf, 0x74bbbb24, 0xd98e006b,
0xdeaa47eb, 0x05f2179e, 0x437b0b71, 0xa7c95f8f, 0x00a99d3b, 0x3fc3c444, 0x72686f8e, 0x00fd01a9,
0xdedc0787, 0xc6af7626, 0x7012fe76, 0xf2a5f7ce, 0x9a7b2eda, 0x5e57fcf2, 0x4da0d4ad, 0x5c63b155,
0x34117375, 0xd4134c11, 0x2ea77435, 0x5278b6de, 0xab522c4c, 0xbc8fc702, 0xc94a09e4, 0xebb93a9e,
0x91ecb65e, 0x4c52ecc6, 0x8703bb52, 0xcb2d60aa, 0x30a0538a, 0x1514f10b, 0x157f6329, 0x3429dc3d,
0x5db73eb2, 0xa7a1a969, 0x7286bd24, 0x0df6881e, 0x3785ba5f, 0xcd04623a, 0x02758170, 0xd827f556,
0x99d95191, 0x84457eb1, 0x58a7fb22, 0xd2967c5f, 0x4f0c33f6, 0x4a02099a, 0xe0904821, 0x94124036,
0x496a031b, 0x780b69c4, 0xcf1a4927, 0x87a119b8, 0xcdfaf4f8, 0x4cf9cd0f, 0x27c96a84, 0x6d11117e,
0x7f8cf847, 0x74ceede5, 0xc88905e6, 0x60215841, 0x7172875a, 0x736e993a, 0x010aa53c, 0x43d53c2b,
0xf0d91a93, 0x0d983b56, 0xf816663c, 0xe5d13363, 0x0a61737c, 0x09d51150, 0x83a5ac2f, 0x3e884905,
0x7b01aeb5, 0x600a6ea7, 0xb7678f7b, 0x72b38977, 0x068018f2, 0xce6ae45b, 0x29188aa8, 0xe5a0b1e9,
0xc04c2b86, 0x8bd14d75, 0x648781f3, 0xdbae1e0a, 0xddcdd8ae, 0xab4d81a3, 0x446baaba, 0x1cc0c19d,
0x17be4f90, 0x82c0e65d, 0x676f9c95, 0x5c708db2, 0x6fd4c867, 0xa5106ef0, 0x19dde49d, 0x78182f95,
0xd089cd81, 0xa32e98fe, 0xbe306c82, 0x6cd83d8c, 0x037f1bde, 0x0b15722d, 0xeddc1e22, 0x93c76559,
0x8a2f571b, 0x92cc81b4, 0x021b7477, 0x67523904, 0xc95dbccc, 0xac17ee9d, 0x944e46bc, 0x0781867e,
0xc854dd9d, 0x26e2c30c, 0x858c0416, 0x6d397708, 0xebe29c58, 0xc80ced86, 0xd496b4ab, 0xbe45e6f5,
0x10d24706, 0xacf8187a, 0x96f523cb, 0x2227e143, 0x78c36564, 0x4643adc2, 0x4729d97a, 0xcff93e0d,
0x25484bbd, 0x91c6798e, 0x95f773f4, 0x44204675, 0x2eda57ba, 0x06d313ef, 0xeeaa4466, 0x2dfa7530,
0xa8af0c9b, 0x39f1535e, 0x0cc2b7bd, 0x38a76c0e, 0x4f41071d, 0xcdaf2475, 0x49a6eff8, 0x01621748,
0x36ebacab, 0xbd6d9a29, 0x44d1cd65, 0x40815dfd, 0x55fa5a1a, 0x87cce9e9, 0xae559b45, 0xd76b4c26,
0x637d60ad, 0xde29f5f9, 0x97491cbb, 0xfb350040, 0xffe7f997, 0x201c9dcd, 0xe61320e9, 0xa90987a3,
0xe24afa83, 0x61c1e6fc, 0xcc87ff62, 0xf1c9d8fa, 0x4fd04546, 0x90ecc76e, 0x46e456b9, 0x305dceb8,
0xf627e68c, 0x2d286815, 0xc705bbfd, 0x101b6df3, 0x892dae62, 0xd5b7fb44, 0xea1d5c94, 0x5332e3cb,
0xf856f88a, 0xb341b0e9, 0x28408d9d, 0x5421bc17, 0xeb9af9bc, 0x602371c5, 0x67985a91, 0xd774907f,
0x7c4d697d, 0x9370b0b8, 0x6ff5cebb, 0x7d465744, 0x674ceac0, 0xea9102fc, 0x0de94784, 0xc793de69,
0xfe599bb1, 0xc6ad952f, 0x6d6ca9c3, 0x928c3f91, 0xf9022f05, 0x24a164dc, 0xe5e98cd3, 0x7649efdb,
0x6df3bcdb, 0x5d1e9ff1, 0x17f5d010, 0xe2686ea1, 0x6eac77fe, 0x7bb5c585, 0x88d90cbb, 0x18689163,
0x67c9efa5, 0xc0b76d9b, 0x960efbab, 0xbd872807, 0x70f4c474, 0x56c29d20, 0xd1541d15, 0x88137033,
0xe3f02b3e, 0xb6d9b28d, 0x53a077ba, 0xeedcd29e, 0xa50a6c1d, 0x12c2801e, 0x52ba335b, 0x35984614,
0xe2599aa8, 0xaf94ed1d, 0xd90d4767, 0x202c7d07, 0x77bec4f4, 0xfa71bc80, 0xfc5c8b76, 0x8d0fbbfc,
0xda366dc6, 0x8b32a0c7, 0x1b36f7fc, 0x6642dcbc, 0x6fe7e724, 0x8b5fa782, 0xc4227404, 0x3a7d1da7,
0x517ed658, 0x8a18df6d, 0x3e5c9b23, 0x1fbd51ef, 0x1470601d, 0x3400389c, 0x676b065d, 0x8864ad80,
0xea6f1a9c, 0x2db484e1, 0x608785f0, 0x8dd384af, 0x69d26699, 0x409c4e16, 0x77f9986a, 0x7f491266,
0x883ea6cf, 0xeaa06072, 0xfa2e5db5, 0x352594b4, 0x9156bb89, 0xa2fbbbfb, 0xac3989c7, 0x6e2422b1,
0x581f3560, 0x1009a9b5, 0x7e5ad9cd, 0xa9fc0a6e, 0x43e5998e, 0x7f8778f9, 0xf038f8e1, 0x5415c2e8,
0x6499b731, 0xb82389ae, 0x05d4d819, 0x0f06440e, 0xf1735aa0, 0x986430ee, 0x47ec952c, 0xbf149cc5,
0xb3cb2cb6, 0x3f41e8c2, 0x271ac51b, 0x48ac5ded, 0xf76a0469, 0x717bba4d, 0x4f5c90d6, 0x3b74f756,
0x1824110a, 0xa4fd43e3, 0x1eb0507c, 0xa9375c08, 0x157c59a7, 0x0cad8f51, 0xd66031a0, 0xabb5343f,
0xe533fa43, 0x1996e2bb, 0xd7953a71, 0xd2529b94, 0x58f0fa07, 0x4c9b1877, 0x057e990d, 0x8bfe19c4,
0xa8e2c0c9, 0x99fcaada, 0x69d2aaca, 0xdc1c4642, 0xf4d22307, 0x7fe27e8c, 0x1366aa07, 0x1594e637,
0xce1066bf, 0xdb922552, 0x9930b52a, 0xaeaa9a3e, 0x31ff7eb4, 0x5e1f945a, 0x150ac49c, 0x0ccdac2d,
0xd8a8a217, 0xb82ea6e5, 0xd6a74659, 0x67b7e3e6, 0x836eef4a, 0xb6f90074, 0x7fa3ea4b, 0xcb038123,
0xbf069f55, 0x1fa83fc4, 0xd6ebdb23, 0x16f0a137, 0x19a7110d, 0x5ff3b55f, 0xfb633868, 0xb466f845,
0xbce0c198, 0x88404296, 0xddbdd88b, 0x7fc52546, 0x63a553f8, 0xa728405a, 0x378a2bce, 0x6862e570,
0xefb77e7d, 0xc611625e, 0x32515c15, 0x6984b765, 0xe8405976, 0x9ba386fd, 0xd4eed4d9, 0xf8fe0309,
0x0ce54601, 0xbaf879c2, 0xd8524057, 0x1d8c1d7a, 0x72c0a3a9, 0x5a1ffbde, 0x82f33a45, 0x5143f446,
0x29c7e182, 0xe536c32f, 0x5a6f245b, 0x44272adb, 0xcb701d9c, 0xf76137ec, 0x0841f145, 0xe7042ecc,
0xf1277dd7, 0x745cf92c, 0xa8fe65fe, 0xd3e2d7cf, 0x54c513ef, 0x6079bc2d, 0xb66336b0, 0x101e383b,
0xbcd75753, 0x25be238a, 0x56a6f0be, 0xeeffcc17, 0x5ea31f3d, 0x0ae772f5, 0xf76de3de, 0x1bbecdad,
0xc9107d43, 0xf7e38dce, 0x618358cd, 0x5c833f04, 0xf6975906, 0xde4177e5, 0x67d314dc, 0xb4760f3e,
0x56ce5888, 0x0e8345a8, 0xbff6b1bf, 0x78dfb112, 0xf1709c1e, 0x7bb8ed8b, 0x902402b9, 0xdaa64ae0,
0x46b71d89, 0x7eee035f, 0xbe376509, 0x99648f3a, 0x0863ea1f, 0x49ad8887, 0x79bdecc5, 0x3c10b568,
0x5f2e4bae, 0x04ef20ab, 0x72f8ce7b, 0x521e1ebe, 0x14525535, 0x2e8af95b, 0x9094ccfd, 0xbcf36713,
0xc73953ef, 0xd4b91474, 0x6554ec2d, 0xe3885c96, 0x03dc73b7, 0x931688a9, 0xcbbef182, 0x2b77cfc9,
0x632a32bd, 0xd2115dcc, 0x1ae5533d, 0x32684e13, 0x4cc5a004, 0x13321bde, 0x62cbd38d, 0x78383a3b,
0xd00686f1, 0x9f601ee7, 0x7eaf23de, 0x3110c492, 0x9c351209, 0x7eb89d52, 0x6d566eac, 0xc2efd226,
0x32e9fac5, 0x52227274, 0x09f84725, 0xb8d0b605, 0x72291f02, 0x71b5c34b, 0x3dbfcbb8, 0x04a02263,
0x55ba597f, 0xd4e4037d, 0xc813e1be, 0xffddeefa, 0xc3c058f3, 0x87010f2e, 0x1dfcf55f, 0xc694eeeb,
0xa9c01a74, 0x98c2fc6b, 0xe57e1428, 0xdd265a71, 0x836b956d, 0x7e46ab1a, 0x5835d541, 0x50b32505,
0xe640913c, 0xbb486079, 0xfe496263, 0x113c5b69, 0x93cd6620, 0x5efe823b, 0x2d657b40, 0xb46dfc6c,
0x57710c69, 0xfe9fadeb, 0xb5f8728a, 0xe3224170, 0xca28b751, 0xfdabae56, 0x5ab12c3c, 0xa697c457,
0xd28fa2b7, 0x056579f2, 0x9fd9d810, 0xe3557478, 0xd88d89ab, 0xa72a9422, 0x6d47abd0, 0x405bcbd9,
0x6f83ebaf, 0x13caec76, 0xfceb9ee2, 0x2e922df7, 0xce9856df, 0xc05e9322, 0x2772c854, 0xb67f2a32,
0x6d1af28d, 0x3a78cf77, 0xdff411e4, 0x61c74ca9, 0xed8b842e, 0x72880845, 0x6e857085, 0xc6404932,
0xee37f6bc, 0x27116f48, 0x5e9ec45a, 0x8ea2a51f, 0xa5573db7, 0xa746d036, 0x486b4768, 0x5b438f3b,
0x18c54a5c, 0x64fcf08e, 0xe993cdc1, 0x35c1ead3, 0x9de07de7, 0x321b841c, 0x87423c5e, 0x071aa0f6,
0x962eb75b, 0xbb06bdd2, 0xdcdb5363, 0x389752f2, 0x83d9cc88, 0xd014adc6, 0xc71121bb, 0x2372f938,
0xcaff2650, 0x62be8951, 0x56dccaff, 0xac4084c0, 0x09712e95, 0x1d3c288f, 0x1b085744, 0xe1d3cfef,
0x5c9a812e, 0x6611fd59, 0x85e46044, 0x1981d885, 0x5a4c903f, 0x43f30d4b, 0x7d1d601b, 0xdd3c3391,
0x030ec65e, 0xc12878cd, 0x72e795fe, 0xd0c76abd, 0x1ec085db, 0x7cbb61fa, 0x93e8dd1e, 0x8582eb06,
0x73563144, 0x049d4e7e, 0x5fd5aefe, 0x7b842a00, 0x75ced665, 0xbb32d458, 0x4e83bba7, 0x8f15151f,
0x7795a125, 0xf0842455, 0x499af99d, 0x565cc7fa, 0xa3b1278d, 0x3f27ce74, 0x96ca058e, 0x8a497443,
0xa6fb8cae, 0xc115aa21, 0x17504923, 0xe4932402, 0xaea886c2, 0x8eb79af5, 0xebd5ea6b, 0xc7980d3b,
0x71369315, 0x796e6a66, 0x3a7ec708, 0xb05175c8, 0xe02b74e7, 0xeb377ad3, 0x6c8c1f54, 0xb980c374,
0x59aee281, 0x449cb799, 0xe01f5605, 0xed0e085e, 0xc9a1a3b4, 0xaac481b1, 0xc935c39c, 0xb7d8ce7f
};
#define squeeze(s, k) {\
((uint32_t *)output)[k] = (s); \
}
struct GPU_DEVICE
{
uint32_t state[N];
uint32_t nonce_id;
uint8_t *target;
uint32_t *g_state;
uint8_t *g_target;
uint32_t *g_nonce_id;
};
GPU_DEVICE *gpu_divices[MAX_GPU_NUM] = {NULL};
uint32_t gpu_divices_cnt = 0;
__global__ void eaglesong(uint32_t *state, uint8_t* target, uint32_t *nonce_id)
{
uint32_t global_id = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t id = global_id % THREADS_PER_BLOCK;
uint32_t tmp;
uint32_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
uint8_t output[OUTPUT_LEN];
__shared__ uint32_t shared_state[N];
__shared__ uint8_t shared_target[OUTPUT_LEN];
if (id < N) shared_state[id] = state[id];
if (id < OUTPUT_LEN) shared_target[id] = target[id];
__syncthreads();
s0 = shared_state[0] ^ (global_id+1);
s1 = shared_state[1]; s2 = shared_state[2]; s3 = shared_state[3];
s4 = shared_state[4]; s5 = shared_state[5]; s6 = shared_state[6]; s7 = shared_state[7];
s8 = s9 = s10 = s11 = s12 = s13 = s14 = s15 = 0;
EaglesongPermutation();
s0 ^= shared_state[8]; s1 ^= shared_state[9]; s2 ^= shared_state[10];
EaglesongPermutation();
squeeze(s0, 0); squeeze(s1, 1); squeeze(s2, 2); squeeze(s3, 3);
squeeze(s4, 4); squeeze(s5, 5); squeeze(s6, 6); squeeze(s7, 7);
for(int k=0; k<32; ++k) {
if(output[k] < shared_target[k]) {
atomicExch(nonce_id, global_id+1);
} else if(output[k] > shared_target[k]) {
break;
}
}
}
int gpu_hash(uint32_t gpuid)
{
if (HASH_NUM > MAX_HASH_NUM) {
printf("HASH_NUM out of bound!!!\n");
return 0;
}
if (gpu_divices[gpuid]->g_state == NULL)
{
if (hipMalloc((void **)&gpu_divices[gpuid]->g_state, sizeof(gpu_divices[gpuid]->state)) != hipSuccess) {
printf("E01: cuda alloc memory error for state\n");
return 0;
}
}
if (gpu_divices[gpuid]->g_nonce_id == NULL)
{
if (hipMalloc((void **)&gpu_divices[gpuid]->g_nonce_id, sizeof(gpu_divices[gpuid]->nonce_id)) != hipSuccess) {
printf("E02: cuda alloc memory error for nonce\n");
return 0;
}
}
if (gpu_divices[gpuid]->g_target == NULL)
{
if (hipMalloc((void **)&gpu_divices[gpuid]->g_target, OUTPUT_LEN) != hipSuccess) {
printf("E03: cuda alloc memory error for target\n");
return 0;
}
}
if (hipMemcpy(gpu_divices[gpuid]->g_state, gpu_divices[gpuid]->state, sizeof(gpu_divices[gpuid]->state), hipMemcpyHostToDevice) != hipSuccess)
{
printf("E04: copy memory error for state\n");
return 0;
}
if (hipMemcpy(gpu_divices[gpuid]->g_target, gpu_divices[gpuid]->target, OUTPUT_LEN, hipMemcpyHostToDevice) != hipSuccess)
{
printf("E05: copy memory error for target\n");
return 0;
}
if (hipMemcpy(gpu_divices[gpuid]->g_nonce_id, &(gpu_divices[gpuid]->nonce_id), sizeof(gpu_divices[gpuid]->nonce_id), hipMemcpyHostToDevice) != hipSuccess)
{
printf("E06: copy memory error for nonce\n");
return 0;
}
eaglesong << <HASH_NUM / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(gpu_divices[gpuid]->g_state, gpu_divices[gpuid]->g_target, gpu_divices[gpuid]->g_nonce_id);
hipDeviceSynchronize();
if (hipMemcpy(&(gpu_divices[gpuid]->nonce_id), gpu_divices[gpuid]->g_nonce_id, sizeof(gpu_divices[gpuid]->nonce_id), hipMemcpyDeviceToHost) != hipSuccess)
{
printf("E07: copy memory error for g_nonce_id\n");
return 0;
}
return HASH_NUM;
}
GPU_DEVICE* New_GPU_DEVICE()
{
GPU_DEVICE* p = NULL;
p = (GPU_DEVICE*)malloc(sizeof(GPU_DEVICE));
if (p != NULL)
{
p->g_target = NULL;
p->g_nonce_id = NULL;
p->g_state = NULL;
p->g_target = NULL;
} else {
printf("E08: alloc memory error!\n");
}
return p;
}
void RESET_GPU_DEVICE(uint32_t gpuid)
{
memset(gpu_divices[gpuid]->state, 0, sizeof(gpu_divices[gpuid]->state));
gpu_divices[gpuid]->nonce_id = 0;
hipFree(gpu_divices[gpuid]->g_nonce_id);
hipFree(gpu_divices[gpuid]->g_state);
hipFree(gpu_divices[gpuid]->g_target);
gpu_divices[gpuid]->target = NULL;
gpu_divices[gpuid]->g_nonce_id = NULL;
gpu_divices[gpuid]->g_state = NULL;
gpu_divices[gpuid]->g_target = NULL;
}
void GPU_Count()
{
int num;
hipDeviceProp_t prop;
hipGetDeviceCount(&num);
printf("deviceCount := %d\n", num);
gpu_divices_cnt = 0;
for (int i = 0; i<num; i++)
{
hipGetDeviceProperties(&prop, i);
printf("name:%s\n", prop.name);
printf("totalGlobalMem:%lu GB\n", prop.totalGlobalMem / 1024 / 1024 / 1024);
printf("multiProcessorCount:%d\n", prop.multiProcessorCount);
printf("maxThreadsPerBlock:%d\n", prop.maxThreadsPerBlock);
printf("sharedMemPerBlock:%lu KB\n", prop.sharedMemPerBlock/1024);
printf("major:%d,minor:%d\n", prop.major, prop.minor);
gpu_divices_cnt++;
}
if (gpu_divices_cnt > MAX_GPU_NUM)gpu_divices_cnt = MAX_GPU_NUM;
}
uint32_t EaglesongHash(uint8_t *input, uint8_t *target, uint64_t *nonce, uint32_t gpuid) {
while(!gpu_divices[gpuid]) {
gpu_divices[gpuid] = New_GPU_DEVICE();
}
uint32_t ret;
RAND_bytes((uint8_t*) &(gpu_divices[gpuid]->state[0]), 4);
RAND_bytes((uint8_t*) &(gpu_divices[gpuid]->state[1]), 4);
// absorbing
for(int j = 0, k=0; j <= M; ++j) {
uint32_t sum = 0;
for(int v=0; v < 4; ++v) {
if(k < INPUT_LEN) {
sum = (sum << 8) ^ input[k];
} else if(k == INPUT_LEN) {
sum = (sum << 8) ^ DELIMITER;
}
++k;
}
gpu_divices[gpuid]->state[j+2] = sum;
}
gpu_divices[gpuid]->target = target;
gpu_divices[gpuid]->nonce_id = 0;
ret = gpu_hash(gpuid);
if(gpu_divices[gpuid]->nonce_id) {
*nonce = le32toh(htobe32(gpu_divices[gpuid]->state[1]));
*nonce = (*nonce << 32) ^ le32toh(htobe32(((gpu_divices[gpuid]->state[0])^(gpu_divices[gpuid]->nonce_id))));
}
return ret;
}
int main() {
uint8_t input[32];
uint8_t target[32];
uint64_t nonce = 0;
for(int i=0; i<32; ++i) {
input[i] = i + 75;
}
for(int i=0; i<32; ++i) {
target[i]= 0xff;
}
target[0] = 0;
target[1] = 0;
target[2] = 0;
clock_t t1, t2;
t1 = clock();
int sum = EaglesongHash(input, target, &nonce, 0);
t2 = clock();
// for (int i = 0; i < 32; ++i) printf("%02x", output[i]); printf("\n");
printf("nonce %lu, time: %f %f ms\n", nonce, (double)(t2 - t1) / CLOCKS_PER_SEC, (double)(sum) / ((double)(t2 - t1) / (CLOCKS_PER_SEC)));
printf("%lu", nonce);
} | 659d854a62b71fa186320db9e89eed6a779639ae.cu | /// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stddef.h>
#include <openssl/rand.h>
#include "portable_endian.h"
#include <time.h>
#define INPUT_LEN (32)
#define N ((INPUT_LEN+8+1)+3) >> 2
#define M (INPUT_LEN >> 2)
#define OUTPUT_LEN 32
#define THREADS_PER_BLOCK (512)
#define MAX_HASH_NUM (1<<30)
#define MAX_GPU_NUM (1024)
#define HASH_NUM (1<<27)
#define DELIMITER (0x06)
#define ROUND (43)
#define ROL32(a,b) (((a)<<(b))|((a)>>(32-(b))))
#define ROL_ADD(a,b) a += b; a = ROL32(a, 8); b = ROL32(b, 24) + a;
#define EaglesongPermutation() \
{ \
for(int i = 0, j=0; i < ROUND ; ++i, j+=16) { \
tmp = s0 ^ s4 ^ s12 ^ s15; s0 = tmp^s5 ^ s6 ^ s7; s1 = tmp^s1 ^ s8 ^ s13; \
tmp = s1 ^ s2 ^ s6 ^ s14; s2 = tmp^s7 ^ s8 ^ s9; s3 = tmp^s3 ^ s10 ^ s15; \
tmp = s0 ^ s3 ^ s4 ^ s8; s4 = tmp^s9 ^ s10 ^ s11; s5 = tmp^s1 ^ s5 ^ s12; \
tmp = s2 ^ s5 ^ s6 ^ s10; s6 = tmp^s11 ^ s12 ^ s13; s7 = tmp^s3 ^ s7 ^ s14; \
tmp = s4 ^ s7 ^ s8 ^ s12; s8 = tmp^s13 ^ s14 ^ s15; s9 = tmp^s0 ^ s5 ^ s9; \
tmp = s6 ^ s9 ^ s10 ^ s14; s10 = tmp^s0 ^ s1 ^ s15; s11 = tmp^s2 ^ s7 ^ s11; \
tmp = s0 ^ s8 ^ s11 ^ s12; s12 = tmp^s1 ^ s2 ^ s3; s13 = tmp^s4 ^ s9 ^ s13; \
tmp = s3 ^ s5 ^ s13 ^ s14; s14 = tmp^s2 ^ s4 ^ s10; s15 = tmp^s0 ^ s1 ^ s6 ^ s7 ^ s8 ^ s9 ^ s15; \
s0 ^= ROL32(s0, 2) ^ ROL32(s0, 4) ^ gpu_injection_constants[(j ^ 0)]; \
s1 ^= ROL32(s1, 13) ^ ROL32(s1, 22) ^ gpu_injection_constants[(j ^ 1)]; \
ROL_ADD(s0, s1); \
s2 ^= ROL32(s2, 4) ^ ROL32(s2, 19) ^ gpu_injection_constants[(j ^ 2)]; \
s3 ^= ROL32(s3, 3) ^ ROL32(s3, 14) ^ gpu_injection_constants[(j ^ 3)]; \
ROL_ADD(s2, s3); \
s4 ^= ROL32(s4, 27) ^ ROL32(s4, 31) ^ gpu_injection_constants[(j ^ 4)]; \
s5 ^= ROL32(s5, 3) ^ ROL32(s5, 8) ^ gpu_injection_constants[(j ^ 5)]; \
ROL_ADD(s4, s5); \
s6 ^= ROL32(s6, 17) ^ ROL32(s6, 26) ^ gpu_injection_constants[(j ^ 6)]; \
s7 ^= ROL32(s7, 3) ^ ROL32(s7, 12) ^ gpu_injection_constants[(j ^ 7)]; \
ROL_ADD(s6, s7); \
s8 ^= ROL32(s8, 18) ^ ROL32(s8, 22) ^ gpu_injection_constants[(j ^ 8)]; \
s9 ^= ROL32(s9, 12) ^ ROL32(s9, 18) ^ gpu_injection_constants[(j ^ 9)]; \
ROL_ADD(s8, s9); \
s10 ^= ROL32(s10, 4) ^ ROL32(s10, 7) ^ gpu_injection_constants[(j ^ 10)]; \
s11 ^= ROL32(s11, 4) ^ ROL32(s11, 31) ^ gpu_injection_constants[(j ^ 11)]; \
ROL_ADD(s10, s11); \
s12 ^= ROL32(s12, 12) ^ ROL32(s12, 27) ^ gpu_injection_constants[(j ^ 12)]; \
s13 ^= ROL32(s13, 7) ^ ROL32(s13, 17) ^ gpu_injection_constants[(j ^ 13)]; \
ROL_ADD(s12, s13); \
s14 ^= ROL32(s14, 7) ^ ROL32(s14, 8) ^ gpu_injection_constants[(j ^ 14)]; \
s15 ^= ROL32(s15, 1) ^ ROL32(s15, 13) ^ gpu_injection_constants[(j ^ 15)]; \
ROL_ADD(s14, s15); \
} \
}
__constant__ uint32_t gpu_injection_constants[688] = //16*43 = 2752B,问题是这个超过了一个内存页,这意味着会很慢,运行过程会切换内存页
{
0x6e9e40ae, 0x71927c02, 0x9a13d3b1, 0xdaec32ad, 0x3d8951cf, 0xe1c9fe9a, 0xb806b54c, 0xacbbf417,
0xd3622b3b, 0xa082762a, 0x9edcf1c0, 0xa9bada77, 0x7f91e46c, 0xcb0f6e4f, 0x265d9241, 0xb7bdeab0,
0x6260c9e6, 0xff50dd2a, 0x9036aa71, 0xce161879, 0xd1307cdf, 0x89e456df, 0xf83133e2, 0x65f55c3d,
0x94871b01, 0xb5d204cd, 0x583a3264, 0x5e165957, 0x4cbda964, 0x675fca47, 0xf4a3033e, 0x2a417322,
0x3b61432f, 0x7f5532f2, 0xb609973b, 0x1a795239, 0x31b477c9, 0xd2949d28, 0x78969712, 0x0eb87b6e,
0x7e11d22d, 0xccee88bd, 0xeed07eb8, 0xe5563a81, 0xe7cb6bcf, 0x25de953e, 0x4d05653a, 0x0b831557,
0x94b9cd77, 0x13f01579, 0x794b4a4a, 0x67e7c7dc, 0xc456d8d4, 0x59689c9b, 0x668456d7, 0x22d2a2e1,
0x38b3a828, 0x0315ac3c, 0x438d681e, 0xab7109c5, 0x97ee19a8, 0xde062b2e, 0x2c76c47b, 0x0084456f,
0x908f0fd3, 0xa646551f, 0x3e826725, 0xd521788e, 0x9f01c2b0, 0x93180cdc, 0x92ea1df8, 0x431a9aae,
0x7c2ea356, 0xda33ad03, 0x46926893, 0x66bde7d7, 0xb501cc75, 0x1f6e8a41, 0x685250f4, 0x3bb1f318,
0xaf238c04, 0x974ed2ec, 0x5b159e49, 0xd526f8bf, 0x12085626, 0x3e2432a9, 0x6bd20c48, 0x1f1d59da,
0x18ab1068, 0x80f83cf8, 0x2c8c11c0, 0x7d548035, 0x0ff675c3, 0xfed160bf, 0x74bbbb24, 0xd98e006b,
0xdeaa47eb, 0x05f2179e, 0x437b0b71, 0xa7c95f8f, 0x00a99d3b, 0x3fc3c444, 0x72686f8e, 0x00fd01a9,
0xdedc0787, 0xc6af7626, 0x7012fe76, 0xf2a5f7ce, 0x9a7b2eda, 0x5e57fcf2, 0x4da0d4ad, 0x5c63b155,
0x34117375, 0xd4134c11, 0x2ea77435, 0x5278b6de, 0xab522c4c, 0xbc8fc702, 0xc94a09e4, 0xebb93a9e,
0x91ecb65e, 0x4c52ecc6, 0x8703bb52, 0xcb2d60aa, 0x30a0538a, 0x1514f10b, 0x157f6329, 0x3429dc3d,
0x5db73eb2, 0xa7a1a969, 0x7286bd24, 0x0df6881e, 0x3785ba5f, 0xcd04623a, 0x02758170, 0xd827f556,
0x99d95191, 0x84457eb1, 0x58a7fb22, 0xd2967c5f, 0x4f0c33f6, 0x4a02099a, 0xe0904821, 0x94124036,
0x496a031b, 0x780b69c4, 0xcf1a4927, 0x87a119b8, 0xcdfaf4f8, 0x4cf9cd0f, 0x27c96a84, 0x6d11117e,
0x7f8cf847, 0x74ceede5, 0xc88905e6, 0x60215841, 0x7172875a, 0x736e993a, 0x010aa53c, 0x43d53c2b,
0xf0d91a93, 0x0d983b56, 0xf816663c, 0xe5d13363, 0x0a61737c, 0x09d51150, 0x83a5ac2f, 0x3e884905,
0x7b01aeb5, 0x600a6ea7, 0xb7678f7b, 0x72b38977, 0x068018f2, 0xce6ae45b, 0x29188aa8, 0xe5a0b1e9,
0xc04c2b86, 0x8bd14d75, 0x648781f3, 0xdbae1e0a, 0xddcdd8ae, 0xab4d81a3, 0x446baaba, 0x1cc0c19d,
0x17be4f90, 0x82c0e65d, 0x676f9c95, 0x5c708db2, 0x6fd4c867, 0xa5106ef0, 0x19dde49d, 0x78182f95,
0xd089cd81, 0xa32e98fe, 0xbe306c82, 0x6cd83d8c, 0x037f1bde, 0x0b15722d, 0xeddc1e22, 0x93c76559,
0x8a2f571b, 0x92cc81b4, 0x021b7477, 0x67523904, 0xc95dbccc, 0xac17ee9d, 0x944e46bc, 0x0781867e,
0xc854dd9d, 0x26e2c30c, 0x858c0416, 0x6d397708, 0xebe29c58, 0xc80ced86, 0xd496b4ab, 0xbe45e6f5,
0x10d24706, 0xacf8187a, 0x96f523cb, 0x2227e143, 0x78c36564, 0x4643adc2, 0x4729d97a, 0xcff93e0d,
0x25484bbd, 0x91c6798e, 0x95f773f4, 0x44204675, 0x2eda57ba, 0x06d313ef, 0xeeaa4466, 0x2dfa7530,
0xa8af0c9b, 0x39f1535e, 0x0cc2b7bd, 0x38a76c0e, 0x4f41071d, 0xcdaf2475, 0x49a6eff8, 0x01621748,
0x36ebacab, 0xbd6d9a29, 0x44d1cd65, 0x40815dfd, 0x55fa5a1a, 0x87cce9e9, 0xae559b45, 0xd76b4c26,
0x637d60ad, 0xde29f5f9, 0x97491cbb, 0xfb350040, 0xffe7f997, 0x201c9dcd, 0xe61320e9, 0xa90987a3,
0xe24afa83, 0x61c1e6fc, 0xcc87ff62, 0xf1c9d8fa, 0x4fd04546, 0x90ecc76e, 0x46e456b9, 0x305dceb8,
0xf627e68c, 0x2d286815, 0xc705bbfd, 0x101b6df3, 0x892dae62, 0xd5b7fb44, 0xea1d5c94, 0x5332e3cb,
0xf856f88a, 0xb341b0e9, 0x28408d9d, 0x5421bc17, 0xeb9af9bc, 0x602371c5, 0x67985a91, 0xd774907f,
0x7c4d697d, 0x9370b0b8, 0x6ff5cebb, 0x7d465744, 0x674ceac0, 0xea9102fc, 0x0de94784, 0xc793de69,
0xfe599bb1, 0xc6ad952f, 0x6d6ca9c3, 0x928c3f91, 0xf9022f05, 0x24a164dc, 0xe5e98cd3, 0x7649efdb,
0x6df3bcdb, 0x5d1e9ff1, 0x17f5d010, 0xe2686ea1, 0x6eac77fe, 0x7bb5c585, 0x88d90cbb, 0x18689163,
0x67c9efa5, 0xc0b76d9b, 0x960efbab, 0xbd872807, 0x70f4c474, 0x56c29d20, 0xd1541d15, 0x88137033,
0xe3f02b3e, 0xb6d9b28d, 0x53a077ba, 0xeedcd29e, 0xa50a6c1d, 0x12c2801e, 0x52ba335b, 0x35984614,
0xe2599aa8, 0xaf94ed1d, 0xd90d4767, 0x202c7d07, 0x77bec4f4, 0xfa71bc80, 0xfc5c8b76, 0x8d0fbbfc,
0xda366dc6, 0x8b32a0c7, 0x1b36f7fc, 0x6642dcbc, 0x6fe7e724, 0x8b5fa782, 0xc4227404, 0x3a7d1da7,
0x517ed658, 0x8a18df6d, 0x3e5c9b23, 0x1fbd51ef, 0x1470601d, 0x3400389c, 0x676b065d, 0x8864ad80,
0xea6f1a9c, 0x2db484e1, 0x608785f0, 0x8dd384af, 0x69d26699, 0x409c4e16, 0x77f9986a, 0x7f491266,
0x883ea6cf, 0xeaa06072, 0xfa2e5db5, 0x352594b4, 0x9156bb89, 0xa2fbbbfb, 0xac3989c7, 0x6e2422b1,
0x581f3560, 0x1009a9b5, 0x7e5ad9cd, 0xa9fc0a6e, 0x43e5998e, 0x7f8778f9, 0xf038f8e1, 0x5415c2e8,
0x6499b731, 0xb82389ae, 0x05d4d819, 0x0f06440e, 0xf1735aa0, 0x986430ee, 0x47ec952c, 0xbf149cc5,
0xb3cb2cb6, 0x3f41e8c2, 0x271ac51b, 0x48ac5ded, 0xf76a0469, 0x717bba4d, 0x4f5c90d6, 0x3b74f756,
0x1824110a, 0xa4fd43e3, 0x1eb0507c, 0xa9375c08, 0x157c59a7, 0x0cad8f51, 0xd66031a0, 0xabb5343f,
0xe533fa43, 0x1996e2bb, 0xd7953a71, 0xd2529b94, 0x58f0fa07, 0x4c9b1877, 0x057e990d, 0x8bfe19c4,
0xa8e2c0c9, 0x99fcaada, 0x69d2aaca, 0xdc1c4642, 0xf4d22307, 0x7fe27e8c, 0x1366aa07, 0x1594e637,
0xce1066bf, 0xdb922552, 0x9930b52a, 0xaeaa9a3e, 0x31ff7eb4, 0x5e1f945a, 0x150ac49c, 0x0ccdac2d,
0xd8a8a217, 0xb82ea6e5, 0xd6a74659, 0x67b7e3e6, 0x836eef4a, 0xb6f90074, 0x7fa3ea4b, 0xcb038123,
0xbf069f55, 0x1fa83fc4, 0xd6ebdb23, 0x16f0a137, 0x19a7110d, 0x5ff3b55f, 0xfb633868, 0xb466f845,
0xbce0c198, 0x88404296, 0xddbdd88b, 0x7fc52546, 0x63a553f8, 0xa728405a, 0x378a2bce, 0x6862e570,
0xefb77e7d, 0xc611625e, 0x32515c15, 0x6984b765, 0xe8405976, 0x9ba386fd, 0xd4eed4d9, 0xf8fe0309,
0x0ce54601, 0xbaf879c2, 0xd8524057, 0x1d8c1d7a, 0x72c0a3a9, 0x5a1ffbde, 0x82f33a45, 0x5143f446,
0x29c7e182, 0xe536c32f, 0x5a6f245b, 0x44272adb, 0xcb701d9c, 0xf76137ec, 0x0841f145, 0xe7042ecc,
0xf1277dd7, 0x745cf92c, 0xa8fe65fe, 0xd3e2d7cf, 0x54c513ef, 0x6079bc2d, 0xb66336b0, 0x101e383b,
0xbcd75753, 0x25be238a, 0x56a6f0be, 0xeeffcc17, 0x5ea31f3d, 0x0ae772f5, 0xf76de3de, 0x1bbecdad,
0xc9107d43, 0xf7e38dce, 0x618358cd, 0x5c833f04, 0xf6975906, 0xde4177e5, 0x67d314dc, 0xb4760f3e,
0x56ce5888, 0x0e8345a8, 0xbff6b1bf, 0x78dfb112, 0xf1709c1e, 0x7bb8ed8b, 0x902402b9, 0xdaa64ae0,
0x46b71d89, 0x7eee035f, 0xbe376509, 0x99648f3a, 0x0863ea1f, 0x49ad8887, 0x79bdecc5, 0x3c10b568,
0x5f2e4bae, 0x04ef20ab, 0x72f8ce7b, 0x521e1ebe, 0x14525535, 0x2e8af95b, 0x9094ccfd, 0xbcf36713,
0xc73953ef, 0xd4b91474, 0x6554ec2d, 0xe3885c96, 0x03dc73b7, 0x931688a9, 0xcbbef182, 0x2b77cfc9,
0x632a32bd, 0xd2115dcc, 0x1ae5533d, 0x32684e13, 0x4cc5a004, 0x13321bde, 0x62cbd38d, 0x78383a3b,
0xd00686f1, 0x9f601ee7, 0x7eaf23de, 0x3110c492, 0x9c351209, 0x7eb89d52, 0x6d566eac, 0xc2efd226,
0x32e9fac5, 0x52227274, 0x09f84725, 0xb8d0b605, 0x72291f02, 0x71b5c34b, 0x3dbfcbb8, 0x04a02263,
0x55ba597f, 0xd4e4037d, 0xc813e1be, 0xffddeefa, 0xc3c058f3, 0x87010f2e, 0x1dfcf55f, 0xc694eeeb,
0xa9c01a74, 0x98c2fc6b, 0xe57e1428, 0xdd265a71, 0x836b956d, 0x7e46ab1a, 0x5835d541, 0x50b32505,
0xe640913c, 0xbb486079, 0xfe496263, 0x113c5b69, 0x93cd6620, 0x5efe823b, 0x2d657b40, 0xb46dfc6c,
0x57710c69, 0xfe9fadeb, 0xb5f8728a, 0xe3224170, 0xca28b751, 0xfdabae56, 0x5ab12c3c, 0xa697c457,
0xd28fa2b7, 0x056579f2, 0x9fd9d810, 0xe3557478, 0xd88d89ab, 0xa72a9422, 0x6d47abd0, 0x405bcbd9,
0x6f83ebaf, 0x13caec76, 0xfceb9ee2, 0x2e922df7, 0xce9856df, 0xc05e9322, 0x2772c854, 0xb67f2a32,
0x6d1af28d, 0x3a78cf77, 0xdff411e4, 0x61c74ca9, 0xed8b842e, 0x72880845, 0x6e857085, 0xc6404932,
0xee37f6bc, 0x27116f48, 0x5e9ec45a, 0x8ea2a51f, 0xa5573db7, 0xa746d036, 0x486b4768, 0x5b438f3b,
0x18c54a5c, 0x64fcf08e, 0xe993cdc1, 0x35c1ead3, 0x9de07de7, 0x321b841c, 0x87423c5e, 0x071aa0f6,
0x962eb75b, 0xbb06bdd2, 0xdcdb5363, 0x389752f2, 0x83d9cc88, 0xd014adc6, 0xc71121bb, 0x2372f938,
0xcaff2650, 0x62be8951, 0x56dccaff, 0xac4084c0, 0x09712e95, 0x1d3c288f, 0x1b085744, 0xe1d3cfef,
0x5c9a812e, 0x6611fd59, 0x85e46044, 0x1981d885, 0x5a4c903f, 0x43f30d4b, 0x7d1d601b, 0xdd3c3391,
0x030ec65e, 0xc12878cd, 0x72e795fe, 0xd0c76abd, 0x1ec085db, 0x7cbb61fa, 0x93e8dd1e, 0x8582eb06,
0x73563144, 0x049d4e7e, 0x5fd5aefe, 0x7b842a00, 0x75ced665, 0xbb32d458, 0x4e83bba7, 0x8f15151f,
0x7795a125, 0xf0842455, 0x499af99d, 0x565cc7fa, 0xa3b1278d, 0x3f27ce74, 0x96ca058e, 0x8a497443,
0xa6fb8cae, 0xc115aa21, 0x17504923, 0xe4932402, 0xaea886c2, 0x8eb79af5, 0xebd5ea6b, 0xc7980d3b,
0x71369315, 0x796e6a66, 0x3a7ec708, 0xb05175c8, 0xe02b74e7, 0xeb377ad3, 0x6c8c1f54, 0xb980c374,
0x59aee281, 0x449cb799, 0xe01f5605, 0xed0e085e, 0xc9a1a3b4, 0xaac481b1, 0xc935c39c, 0xb7d8ce7f
};
#define squeeze(s, k) {\
((uint32_t *)output)[k] = (s); \
}
struct GPU_DEVICE
{
uint32_t state[N];
uint32_t nonce_id;
uint8_t *target;
uint32_t *g_state;
uint8_t *g_target;
uint32_t *g_nonce_id;
};
GPU_DEVICE *gpu_divices[MAX_GPU_NUM] = {NULL};
uint32_t gpu_divices_cnt = 0;
__global__ void eaglesong(uint32_t *state, uint8_t* target, uint32_t *nonce_id)
{
uint32_t global_id = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t id = global_id % THREADS_PER_BLOCK;
uint32_t tmp;
uint32_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
uint8_t output[OUTPUT_LEN];
__shared__ uint32_t shared_state[N];
__shared__ uint8_t shared_target[OUTPUT_LEN];
if (id < N) shared_state[id] = state[id];
if (id < OUTPUT_LEN) shared_target[id] = target[id];
__syncthreads();
s0 = shared_state[0] ^ (global_id+1);
s1 = shared_state[1]; s2 = shared_state[2]; s3 = shared_state[3];
s4 = shared_state[4]; s5 = shared_state[5]; s6 = shared_state[6]; s7 = shared_state[7];
s8 = s9 = s10 = s11 = s12 = s13 = s14 = s15 = 0;
EaglesongPermutation();
s0 ^= shared_state[8]; s1 ^= shared_state[9]; s2 ^= shared_state[10];
EaglesongPermutation();
squeeze(s0, 0); squeeze(s1, 1); squeeze(s2, 2); squeeze(s3, 3);
squeeze(s4, 4); squeeze(s5, 5); squeeze(s6, 6); squeeze(s7, 7);
for(int k=0; k<32; ++k) {
if(output[k] < shared_target[k]) {
atomicExch(nonce_id, global_id+1);
} else if(output[k] > shared_target[k]) {
break;
}
}
}
int gpu_hash(uint32_t gpuid)
{
if (HASH_NUM > MAX_HASH_NUM) {
printf("HASH_NUM out of bound!!!\n");
return 0;
}
if (gpu_divices[gpuid]->g_state == NULL)
{
if (cudaMalloc((void **)&gpu_divices[gpuid]->g_state, sizeof(gpu_divices[gpuid]->state)) != cudaSuccess) {
printf("E01: cuda alloc memory error for state\n");
return 0;
}
}
if (gpu_divices[gpuid]->g_nonce_id == NULL)
{
if (cudaMalloc((void **)&gpu_divices[gpuid]->g_nonce_id, sizeof(gpu_divices[gpuid]->nonce_id)) != cudaSuccess) {
printf("E02: cuda alloc memory error for nonce\n");
return 0;
}
}
if (gpu_divices[gpuid]->g_target == NULL)
{
if (cudaMalloc((void **)&gpu_divices[gpuid]->g_target, OUTPUT_LEN) != cudaSuccess) {
printf("E03: cuda alloc memory error for target\n");
return 0;
}
}
if (cudaMemcpy(gpu_divices[gpuid]->g_state, gpu_divices[gpuid]->state, sizeof(gpu_divices[gpuid]->state), cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("E04: copy memory error for state\n");
return 0;
}
if (cudaMemcpy(gpu_divices[gpuid]->g_target, gpu_divices[gpuid]->target, OUTPUT_LEN, cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("E05: copy memory error for target\n");
return 0;
}
if (cudaMemcpy(gpu_divices[gpuid]->g_nonce_id, &(gpu_divices[gpuid]->nonce_id), sizeof(gpu_divices[gpuid]->nonce_id), cudaMemcpyHostToDevice) != cudaSuccess)
{
printf("E06: copy memory error for nonce\n");
return 0;
}
eaglesong << <HASH_NUM / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(gpu_divices[gpuid]->g_state, gpu_divices[gpuid]->g_target, gpu_divices[gpuid]->g_nonce_id);
cudaDeviceSynchronize();
if (cudaMemcpy(&(gpu_divices[gpuid]->nonce_id), gpu_divices[gpuid]->g_nonce_id, sizeof(gpu_divices[gpuid]->nonce_id), cudaMemcpyDeviceToHost) != cudaSuccess)
{
printf("E07: copy memory error for g_nonce_id\n");
return 0;
}
return HASH_NUM;
}
GPU_DEVICE* New_GPU_DEVICE()
{
GPU_DEVICE* p = NULL;
p = (GPU_DEVICE*)malloc(sizeof(GPU_DEVICE));
if (p != NULL)
{
p->g_target = NULL;
p->g_nonce_id = NULL;
p->g_state = NULL;
p->g_target = NULL;
} else {
printf("E08: alloc memory error!\n");
}
return p;
}
void RESET_GPU_DEVICE(uint32_t gpuid)
{
memset(gpu_divices[gpuid]->state, 0, sizeof(gpu_divices[gpuid]->state));
gpu_divices[gpuid]->nonce_id = 0;
cudaFree(gpu_divices[gpuid]->g_nonce_id);
cudaFree(gpu_divices[gpuid]->g_state);
cudaFree(gpu_divices[gpuid]->g_target);
gpu_divices[gpuid]->target = NULL;
gpu_divices[gpuid]->g_nonce_id = NULL;
gpu_divices[gpuid]->g_state = NULL;
gpu_divices[gpuid]->g_target = NULL;
}
void GPU_Count()
{
int num;
cudaDeviceProp prop;
cudaGetDeviceCount(&num);
printf("deviceCount := %d\n", num);
gpu_divices_cnt = 0;
for (int i = 0; i<num; i++)
{
cudaGetDeviceProperties(&prop, i);
printf("name:%s\n", prop.name);
printf("totalGlobalMem:%lu GB\n", prop.totalGlobalMem / 1024 / 1024 / 1024);
printf("multiProcessorCount:%d\n", prop.multiProcessorCount);
printf("maxThreadsPerBlock:%d\n", prop.maxThreadsPerBlock);
printf("sharedMemPerBlock:%lu KB\n", prop.sharedMemPerBlock/1024);
printf("major:%d,minor:%d\n", prop.major, prop.minor);
gpu_divices_cnt++;
}
if (gpu_divices_cnt > MAX_GPU_NUM)gpu_divices_cnt = MAX_GPU_NUM;
}
uint32_t EaglesongHash(uint8_t *input, uint8_t *target, uint64_t *nonce, uint32_t gpuid) {
while(!gpu_divices[gpuid]) {
gpu_divices[gpuid] = New_GPU_DEVICE();
}
uint32_t ret;
RAND_bytes((uint8_t*) &(gpu_divices[gpuid]->state[0]), 4);
RAND_bytes((uint8_t*) &(gpu_divices[gpuid]->state[1]), 4);
// absorbing
for(int j = 0, k=0; j <= M; ++j) {
uint32_t sum = 0;
for(int v=0; v < 4; ++v) {
if(k < INPUT_LEN) {
sum = (sum << 8) ^ input[k];
} else if(k == INPUT_LEN) {
sum = (sum << 8) ^ DELIMITER;
}
++k;
}
gpu_divices[gpuid]->state[j+2] = sum;
}
gpu_divices[gpuid]->target = target;
gpu_divices[gpuid]->nonce_id = 0;
ret = gpu_hash(gpuid);
if(gpu_divices[gpuid]->nonce_id) {
*nonce = le32toh(htobe32(gpu_divices[gpuid]->state[1]));
*nonce = (*nonce << 32) ^ le32toh(htobe32(((gpu_divices[gpuid]->state[0])^(gpu_divices[gpuid]->nonce_id))));
}
return ret;
}
int main() {
uint8_t input[32];
uint8_t target[32];
uint64_t nonce = 0;
for(int i=0; i<32; ++i) {
input[i] = i + 75;
}
for(int i=0; i<32; ++i) {
target[i]= 0xff;
}
target[0] = 0;
target[1] = 0;
target[2] = 0;
clock_t t1, t2;
t1 = clock();
int sum = EaglesongHash(input, target, &nonce, 0);
t2 = clock();
// for (int i = 0; i < 32; ++i) printf("%02x", output[i]); printf("\n");
printf("nonce %lu, time: %f %f ms\n", nonce, (double)(t2 - t1) / CLOCKS_PER_SEC, (double)(sum) / ((double)(t2 - t1) / (CLOCKS_PER_SEC)));
printf("%lu", nonce);
} |
ed2a5c33c55e49b480e96c419cca9ef071d616ad.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gray_scale_flip.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
RGB_8 *img = NULL;
hipMalloc(&img, XSIZE*YSIZE);
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gray_scale_flip), dim3(gridBlock),dim3(threadBlock), 0, 0, img,height,width);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gray_scale_flip), dim3(gridBlock),dim3(threadBlock), 0, 0, img,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gray_scale_flip), dim3(gridBlock),dim3(threadBlock), 0, 0, img,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ed2a5c33c55e49b480e96c419cca9ef071d616ad.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gray_scale_flip.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
RGB_8 *img = NULL;
cudaMalloc(&img, XSIZE*YSIZE);
int height = YSIZE;
int width = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gray_scale_flip<<<gridBlock,threadBlock>>>(img,height,width);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gray_scale_flip<<<gridBlock,threadBlock>>>(img,height,width);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gray_scale_flip<<<gridBlock,threadBlock>>>(img,height,width);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e9f9ddf613b0bde9efb59e9d6689df0328263213.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
hipStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
hipError_t error = hipGetDevice(&device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDevice() failed");
}
error = hipGetDeviceProperties(&device_, device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(hipStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
hipStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
hipError_t error = hipMalloc((void **)&workspace_, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
hipError_t error = hipMemset(workspace_, 0, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return ::max(
::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int ldc,
int64_t batch_stride_C,
void const * ptr_D,
int ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = ::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmUniversalArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing poitners to imaginary part of C matrices
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing poitners to imaginary part of D matrices
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| e9f9ddf613b0bde9efb59e9d6689df0328263213.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
cudaStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
cudaError_t error = cudaGetDevice(&device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
error = cudaGetDeviceProperties(&device_, device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(cudaStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
cudaStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
cudaError_t error = cudaMemset(workspace_, 0, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return std::max(
std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int ldc,
int64_t batch_stride_C,
void const * ptr_D,
int ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = std::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmUniversalArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int lda_real, /// Leading dimension of real part of A matrix
int lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int ldb_real, /// Leading dimension of real part of B matrix
int ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing poitners to imaginary part of C matrices
int ldc_real, /// Leading dimension of real part of C matrix
int ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing poitners to imaginary part of D matrices
int ldd_real, /// Leading dimension of real part of D matrix
int ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
84ddb8108f1f4257da652daab28b9e0ca81e4d0d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Optimized Label Equivalence (OLE), enhanced by the use of texture memory as hinted in Asad2019
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Init phase.
// Labels start at value 1, to differentiate them from background, that has value 0.
__global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < img.rows && global_col < img.cols) {
labels[labels_index] = img[img_index] ? (labels_index + 1) : 0;
}
}
__device__ unsigned int MinLabel(unsigned l1, unsigned l2) {
if (l1 && l2)
return min(l1, l2);
else
return l1;
}
__device__ unsigned int FindMinLabel(hipTextureObject_t texObject, unsigned row, unsigned col, unsigned label) {
unsigned int min = label;
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 0, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row + 0));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row + 0));
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row + 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 0, row + 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row + 1));
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Scan(cuda::PtrStepSzi labels, hipTextureObject_t texObject, char *changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
// unsigned labels_index = row * (labels.step / labels.elem_size) + col;
unsigned label = tex2D<unsigned int>(texObject, col, row);
if (label) {
unsigned min_label = FindMinLabel(texObject, row, col, label);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
// Analysis phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Analyze(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned index = labels_index;
while (label - 1 != index) {
index = label - 1;
label = labels[index];
}
labels[labels_index] = label;
}
}
}
__device__ unsigned int FindMinLabelNotTex(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) {
unsigned int min = label;
if (row > 0) {
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]);
}
if (row < labels.rows - 1) {
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]);
}
if (col > 0)
min = MinLabel(min, labels.data[labels_index - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + 1]);
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void ScanNotTex(cuda::PtrStepSzi labels, hipTextureObject_t texObject, char* changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned min_label = FindMinLabelNotTex(labels, row, col, label, labels_index);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
}
}
class OLE_TEX : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
char changes;
char *d_changes;
public:
OLE_TEX() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMalloc(&d_changes, sizeof(char));
// Workaround for 1D images, necessary for sm >= 70
//void (*scan_kernel) (cuda::PtrStepSzi, hipTextureObject_t, char*) = (d_img_.rows == 1 || d_img_.cols == 1) ? ScanNotTex : Scan;
// Create Texture Object
hipChannelFormatDesc chFormatDesc = hipCreateChannelDesc<unsigned int>();
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_img_labels_.data;
resDesc.res.pitch2D.desc = chFormatDesc;
resDesc.res.pitch2D.width = d_img_.cols;
resDesc.res.pitch2D.height = d_img_.rows;
resDesc.res.pitch2D.pitchInBytes = d_img_labels_.step;
hipTextureDesc texDesc = {
{hipAddressModeBorder, hipAddressModeBorder}, // addressMode (fetches with out-of-range coordinates return 0)
hipFilterModePoint, // filterMode (do not interpolate and take the nearest value)
hipReadModeElementType, // readMode (do not convert to floating point, only for 8-bit and 16-bit integer formats)
// other values are defaulted to 0
};
hipTextureObject_t texObject;
hipCreateTextureObject(&texObject, &resDesc, &texDesc, nullptr);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
if (d_img_.rows == 1 || d_img_.cols == 1) {
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
ScanNotTex << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
else {
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
hipDestroyTextureObject(texObject);
hipFree(d_changes);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMalloc(&d_changes, sizeof(char));
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
hipFree(d_changes);
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
// Create Texture Object
hipChannelFormatDesc chFormatDesc = hipCreateChannelDesc<unsigned int>();
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_img_labels_.data;
resDesc.res.pitch2D.desc = chFormatDesc;
resDesc.res.pitch2D.width = d_img_.cols;
resDesc.res.pitch2D.height = d_img_.rows;
resDesc.res.pitch2D.pitchInBytes = d_img_labels_.step;
hipTextureDesc texDesc = {
{hipAddressModeBorder, hipAddressModeBorder}, // addressMode (fetches with out-of-range coordinates return 0)
hipFilterModePoint, // filterMode (do not interpolate and take the nearest value)
hipReadModeElementType, // readMode (do not convert to floating point, only for 8-bit and 16-bit integer formats)
// other values are defaulted to 0
};
hipTextureObject_t texObject;
hipCreateTextureObject(&texObject, &resDesc, &texDesc, nullptr);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
if (d_img_.rows == 1 || d_img_.cols == 1) {
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
ScanNotTex << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
else {
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
hipDestroyTextureObject(texObject);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(OLE_TEX);
| 84ddb8108f1f4257da652daab28b9e0ca81e4d0d.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Optimized Label Equivalence (OLE), enhanced by the use of texture memory as hinted in Asad2019
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Init phase.
// Labels start at value 1, to differentiate them from background, that has value 0.
__global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < img.rows && global_col < img.cols) {
labels[labels_index] = img[img_index] ? (labels_index + 1) : 0;
}
}
__device__ unsigned int MinLabel(unsigned l1, unsigned l2) {
if (l1 && l2)
return min(l1, l2);
else
return l1;
}
__device__ unsigned int FindMinLabel(cudaTextureObject_t texObject, unsigned row, unsigned col, unsigned label) {
unsigned int min = label;
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 0, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row - 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row + 0));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row + 0));
min = MinLabel(min, tex2D<unsigned int>(texObject, col - 1, row + 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 0, row + 1));
min = MinLabel(min, tex2D<unsigned int>(texObject, col + 1, row + 1));
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Scan(cuda::PtrStepSzi labels, cudaTextureObject_t texObject, char *changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
// unsigned labels_index = row * (labels.step / labels.elem_size) + col;
unsigned label = tex2D<unsigned int>(texObject, col, row);
if (label) {
unsigned min_label = FindMinLabel(texObject, row, col, label);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
// Analysis phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Analyze(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned index = labels_index;
while (label - 1 != index) {
index = label - 1;
label = labels[index];
}
labels[labels_index] = label;
}
}
}
__device__ unsigned int FindMinLabelNotTex(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) {
unsigned int min = label;
if (row > 0) {
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]);
}
if (row < labels.rows - 1) {
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]);
}
if (col > 0)
min = MinLabel(min, labels.data[labels_index - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + 1]);
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void ScanNotTex(cuda::PtrStepSzi labels, cudaTextureObject_t texObject, char* changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned min_label = FindMinLabelNotTex(labels, row, col, label, labels_index);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
}
}
class OLE_TEX : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
char changes;
char *d_changes;
public:
OLE_TEX() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMalloc(&d_changes, sizeof(char));
// Workaround for 1D images, necessary for sm >= 70
//void (*scan_kernel) (cuda::PtrStepSzi, cudaTextureObject_t, char*) = (d_img_.rows == 1 || d_img_.cols == 1) ? ScanNotTex : Scan;
// Create Texture Object
cudaChannelFormatDesc chFormatDesc = cudaCreateChannelDesc<unsigned int>();
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_img_labels_.data;
resDesc.res.pitch2D.desc = chFormatDesc;
resDesc.res.pitch2D.width = d_img_.cols;
resDesc.res.pitch2D.height = d_img_.rows;
resDesc.res.pitch2D.pitchInBytes = d_img_labels_.step;
cudaTextureDesc texDesc = {
{cudaAddressModeBorder, cudaAddressModeBorder}, // addressMode (fetches with out-of-range coordinates return 0)
cudaFilterModePoint, // filterMode (do not interpolate and take the nearest value)
cudaReadModeElementType, // readMode (do not convert to floating point, only for 8-bit and 16-bit integer formats)
// other values are defaulted to 0
};
cudaTextureObject_t texObject;
cudaCreateTextureObject(&texObject, &resDesc, &texDesc, nullptr);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
if (d_img_.rows == 1 || d_img_.cols == 1) {
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
ScanNotTex << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
else {
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
cudaDestroyTextureObject(texObject);
cudaFree(d_changes);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMalloc(&d_changes, sizeof(char));
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
cudaFree(d_changes);
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
// Create Texture Object
cudaChannelFormatDesc chFormatDesc = cudaCreateChannelDesc<unsigned int>();
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_img_labels_.data;
resDesc.res.pitch2D.desc = chFormatDesc;
resDesc.res.pitch2D.width = d_img_.cols;
resDesc.res.pitch2D.height = d_img_.rows;
resDesc.res.pitch2D.pitchInBytes = d_img_labels_.step;
cudaTextureDesc texDesc = {
{cudaAddressModeBorder, cudaAddressModeBorder}, // addressMode (fetches with out-of-range coordinates return 0)
cudaFilterModePoint, // filterMode (do not interpolate and take the nearest value)
cudaReadModeElementType, // readMode (do not convert to floating point, only for 8-bit and 16-bit integer formats)
// other values are defaulted to 0
};
cudaTextureObject_t texObject;
cudaCreateTextureObject(&texObject, &resDesc, &texDesc, nullptr);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
if (d_img_.rows == 1 || d_img_.cols == 1) {
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
ScanNotTex << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
else {
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, texObject, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
}
cudaDestroyTextureObject(texObject);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(OLE_TEX);
|
a4a9e48fcb8dad5683d652a92b89b0a35f38ecb8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <sys/time.h>
#include <iomanip>
#include <algorithm>
#include <vector>
#include <hip/hip_runtime.h>
#include <complex>
#include "skyblas.cuh"
#include "cu_complex.h"
#if !defined PARM || !defined PARN
#error "PARM or PARN is not specified! Specify M and N to measure"
#endif
using namespace std;
#define XSTR(s) STR(s)
#define STR(s) #s
#ifdef FC
typedef complex<float> htype;
typedef cuFloatComplex dtype;
dtype makeDtype(htype v) { return make_cuFloatComplex(v.real(), v.imag()); }
#define RAND_HTYPE(gen) htype(gen, gen)
#define MAKE_DTYPE(v1, v2) make_cuFloatComplex(v1, v2)
string mode = "float complex";
int flopsPerCell = 8;
#elif DC
typedef complex<double> htype;
typedef hipDoubleComplex dtype;
dtype makeDtype(htype v) { return make_cuDoubleComplex(v.real(), v.imag()); }
#define RAND_HTYPE(gen) htype(gen, gen)
#define MAKE_DTYPE(v1, v2) make_cuDoubleComplex(v1, v2)
string mode = "double complex";
int flopsPerCell = 8;
#elif FR
typedef float htype;
typedef float dtype;
dtype makeDtype(htype v) { return v; }
#define RAND_HTYPE(gen) htype(gen)
#define MAKE_DTYPE(v1, v2) float(v1)
string mode = "float real";
int flopsPerCell = 2;
#elif DR
typedef double htype;
typedef double dtype;
dtype makeDtype(htype v) { return v; }
#define RAND_HTYPE(gen) htype(gen)
#define MAKE_DTYPE(v1, v2) double(v1)
string mode = "double real";
int flopsPerCell = 2;
#endif
double dtime() {
double tseconds = 0;
struct timeval t;
gettimeofday(&t, NULL);
tseconds = (double)t.tv_sec + (double)t.tv_usec * 1.0e-6;
return tseconds;
}
#define GPU_ERROR(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line,
bool abort = true) {
if (code != hipSuccess) {
cerr << "GPUassert: \"" << hipGetErrorString(code) << "\" in " << file
<< ": " << line << "\n";
if (abort) exit(code);
}
}
__global__ void initKernel(dtype* A, size_t N) {
size_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (size_t idx = tidx; idx < N; idx += blockDim.x * gridDim.x) {
A[idx] = MAKE_DTYPE(idx % 3 - 1, 0);
}
}
dtype* A;
dtype* B;
dtype* C;
dtype* d_temp_storage;
size_t temp_storage_bytes;
void initMatmul(Skyblas::MEMORY_ORDER AOrder, Skyblas::MEMORY_ORDER BOrder,
int M, int N, int K, int lda, int ldb, int ldc,
size_t blockCount) {
GPU_ERROR(hipMalloc(&A, sizeof(dtype) * lda * K));
GPU_ERROR(hipMalloc(&B, sizeof(dtype) * ldb * K));
GPU_ERROR(hipMalloc(&C, sizeof(dtype) * ldc * N));
hipLaunchKernelGGL(( initKernel), dim3(52), dim3(256), 0, 0, A, lda * K);
hipLaunchKernelGGL(( initKernel), dim3(52), dim3(256), 0, 0, B, ldb * K);
hipLaunchKernelGGL(( initKernel), dim3(52), dim3(256), 0, 0, C, ldc * N);
temp_storage_bytes = 0;
d_temp_storage = NULL;
Skyblas::dgemm<dtype, PARM, PARN>(
temp_storage_bytes, d_temp_storage, blockCount, AOrder, BOrder, M, N, K,
makeDtype(1.0), A, lda, B, ldb, makeDtype(1.0), C, ldc);
GPU_ERROR(hipMalloc(&d_temp_storage, sizeof(dtype) * temp_storage_bytes));
}
void deInitMatmul() {
GPU_ERROR(hipFree(A));
GPU_ERROR(hipFree(B));
GPU_ERROR(hipFree(C));
GPU_ERROR(hipFree(d_temp_storage));
}
double measureMatmul(Skyblas::MEMORY_ORDER AOrder, Skyblas::MEMORY_ORDER BOrder,
size_t M, size_t N, size_t K, int lda, int ldb, int ldc,
size_t blockCount, int iters, bool self) {
GPU_ERROR(hipDeviceSynchronize());
htype halpha = 1.0;
htype hbeta = 2.0;
dtype dalpha = makeDtype(halpha);
dtype dbeta = makeDtype(hbeta);
double t1 = dtime();
for (int iter = 0; iter < iters; iter++) {
if (self)
Skyblas::dgemm<dtype, PARM, PARN>(temp_storage_bytes, d_temp_storage,
blockCount, AOrder, BOrder, M, N, K,
dalpha, A, lda, A, lda, dbeta, C, ldc);
else
Skyblas::dgemm<dtype, PARM, PARN>(temp_storage_bytes, d_temp_storage,
blockCount, AOrder, BOrder, M, N, K,
dalpha, A, lda, B, ldb, dbeta, C, ldc);
}
GPU_ERROR(hipDeviceSynchronize());
double t2 = dtime();
return (t2 - t1) / iters;
}
int main(int argc, char** argv) {
size_t N = PARN;
size_t M = PARM;
bool self = true;
if (M == 0 || N == 0) {
std::cout << " M N K blockcount time perf\n";
return 0;
}
size_t maxK = 2 * ((size_t)1 << 30) / ((M + N) * 8);
size_t K = 0.2 * ((size_t)1 << 30) / ((M + N) * 8);
initMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, maxK, M, N, N, 8 * 13);
double resultTime = 0;
while (resultTime < 0.1 && K * 2 <= maxK) {
K *= 2;
resultTime = measureMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, K, M, N, M,
26, 1, false);
}
int iters = max(1, (int)(0.05 / resultTime));
size_t lda = M;
double bestTime = 0;
int bestBlockCount = 0;
for (int blockCount = 1 * 13; blockCount <= 8 * 13; blockCount += 13) {
int sampleSize = 3;
vector<double> times(sampleSize);
for (int t = 0; t < sampleSize; t++) {
times[t] = measureMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, K, lda, N,
M, blockCount, iters, self);
}
sort(times.begin(), times.end());
if (times[sampleSize / 2] < bestTime || bestBlockCount == 0) {
bestTime = times[sampleSize / 2];
bestBlockCount = blockCount;
}
}
cout << setw(3) << M << " " << setw(3) << N << " " << setw(9)
<< K << " " << setw(10) << bestBlockCount << " " << setprecision(3)
<< setw(8) << bestTime << " " << setw(5)
<< M * N * K * flopsPerCell / bestTime * 1e-9 << "\n";
cout.flush();
deInitMatmul();
}
| a4a9e48fcb8dad5683d652a92b89b0a35f38ecb8.cu | #include <iostream>
#include <cstdlib>
#include <sys/time.h>
#include <iomanip>
#include <algorithm>
#include <vector>
#include <cuda_runtime.h>
#include <complex>
#include "skyblas.cuh"
#include "cu_complex.h"
#if !defined PARM || !defined PARN
#error "PARM or PARN is not specified! Specify M and N to measure"
#endif
using namespace std;
#define XSTR(s) STR(s)
#define STR(s) #s
#ifdef FC
typedef complex<float> htype;
typedef cuFloatComplex dtype;
dtype makeDtype(htype v) { return make_cuFloatComplex(v.real(), v.imag()); }
#define RAND_HTYPE(gen) htype(gen, gen)
#define MAKE_DTYPE(v1, v2) make_cuFloatComplex(v1, v2)
string mode = "float complex";
int flopsPerCell = 8;
#elif DC
typedef complex<double> htype;
typedef cuDoubleComplex dtype;
dtype makeDtype(htype v) { return make_cuDoubleComplex(v.real(), v.imag()); }
#define RAND_HTYPE(gen) htype(gen, gen)
#define MAKE_DTYPE(v1, v2) make_cuDoubleComplex(v1, v2)
string mode = "double complex";
int flopsPerCell = 8;
#elif FR
typedef float htype;
typedef float dtype;
dtype makeDtype(htype v) { return v; }
#define RAND_HTYPE(gen) htype(gen)
#define MAKE_DTYPE(v1, v2) float(v1)
string mode = "float real";
int flopsPerCell = 2;
#elif DR
typedef double htype;
typedef double dtype;
dtype makeDtype(htype v) { return v; }
#define RAND_HTYPE(gen) htype(gen)
#define MAKE_DTYPE(v1, v2) double(v1)
string mode = "double real";
int flopsPerCell = 2;
#endif
double dtime() {
double tseconds = 0;
struct timeval t;
gettimeofday(&t, NULL);
tseconds = (double)t.tv_sec + (double)t.tv_usec * 1.0e-6;
return tseconds;
}
#define GPU_ERROR(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line,
bool abort = true) {
if (code != cudaSuccess) {
cerr << "GPUassert: \"" << cudaGetErrorString(code) << "\" in " << file
<< ": " << line << "\n";
if (abort) exit(code);
}
}
__global__ void initKernel(dtype* A, size_t N) {
size_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (size_t idx = tidx; idx < N; idx += blockDim.x * gridDim.x) {
A[idx] = MAKE_DTYPE(idx % 3 - 1, 0);
}
}
dtype* A;
dtype* B;
dtype* C;
dtype* d_temp_storage;
size_t temp_storage_bytes;
void initMatmul(Skyblas::MEMORY_ORDER AOrder, Skyblas::MEMORY_ORDER BOrder,
int M, int N, int K, int lda, int ldb, int ldc,
size_t blockCount) {
GPU_ERROR(cudaMalloc(&A, sizeof(dtype) * lda * K));
GPU_ERROR(cudaMalloc(&B, sizeof(dtype) * ldb * K));
GPU_ERROR(cudaMalloc(&C, sizeof(dtype) * ldc * N));
initKernel<<<52, 256>>>(A, lda * K);
initKernel<<<52, 256>>>(B, ldb * K);
initKernel<<<52, 256>>>(C, ldc * N);
temp_storage_bytes = 0;
d_temp_storage = NULL;
Skyblas::dgemm<dtype, PARM, PARN>(
temp_storage_bytes, d_temp_storage, blockCount, AOrder, BOrder, M, N, K,
makeDtype(1.0), A, lda, B, ldb, makeDtype(1.0), C, ldc);
GPU_ERROR(cudaMalloc(&d_temp_storage, sizeof(dtype) * temp_storage_bytes));
}
void deInitMatmul() {
GPU_ERROR(cudaFree(A));
GPU_ERROR(cudaFree(B));
GPU_ERROR(cudaFree(C));
GPU_ERROR(cudaFree(d_temp_storage));
}
double measureMatmul(Skyblas::MEMORY_ORDER AOrder, Skyblas::MEMORY_ORDER BOrder,
size_t M, size_t N, size_t K, int lda, int ldb, int ldc,
size_t blockCount, int iters, bool self) {
GPU_ERROR(cudaDeviceSynchronize());
htype halpha = 1.0;
htype hbeta = 2.0;
dtype dalpha = makeDtype(halpha);
dtype dbeta = makeDtype(hbeta);
double t1 = dtime();
for (int iter = 0; iter < iters; iter++) {
if (self)
Skyblas::dgemm<dtype, PARM, PARN>(temp_storage_bytes, d_temp_storage,
blockCount, AOrder, BOrder, M, N, K,
dalpha, A, lda, A, lda, dbeta, C, ldc);
else
Skyblas::dgemm<dtype, PARM, PARN>(temp_storage_bytes, d_temp_storage,
blockCount, AOrder, BOrder, M, N, K,
dalpha, A, lda, B, ldb, dbeta, C, ldc);
}
GPU_ERROR(cudaDeviceSynchronize());
double t2 = dtime();
return (t2 - t1) / iters;
}
int main(int argc, char** argv) {
size_t N = PARN;
size_t M = PARM;
bool self = true;
if (M == 0 || N == 0) {
std::cout << " M N K blockcount time perf\n";
return 0;
}
size_t maxK = 2 * ((size_t)1 << 30) / ((M + N) * 8);
size_t K = 0.2 * ((size_t)1 << 30) / ((M + N) * 8);
initMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, maxK, M, N, N, 8 * 13);
double resultTime = 0;
while (resultTime < 0.1 && K * 2 <= maxK) {
K *= 2;
resultTime = measureMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, K, M, N, M,
26, 1, false);
}
int iters = max(1, (int)(0.05 / resultTime));
size_t lda = M;
double bestTime = 0;
int bestBlockCount = 0;
for (int blockCount = 1 * 13; blockCount <= 8 * 13; blockCount += 13) {
int sampleSize = 3;
vector<double> times(sampleSize);
for (int t = 0; t < sampleSize; t++) {
times[t] = measureMatmul(Skyblas::COLUMN, Skyblas::ROW, M, N, K, lda, N,
M, blockCount, iters, self);
}
sort(times.begin(), times.end());
if (times[sampleSize / 2] < bestTime || bestBlockCount == 0) {
bestTime = times[sampleSize / 2];
bestBlockCount = blockCount;
}
}
cout << setw(3) << M << " " << setw(3) << N << " " << setw(9)
<< K << " " << setw(10) << bestBlockCount << " " << setprecision(3)
<< setw(8) << bestTime << " " << setw(5)
<< M * N * K * flopsPerCell / bestTime * 1e-9 << "\n";
cout.flush();
deInitMatmul();
}
|
782b5bd7297b25f18703f3290e566468f7204489.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
template <typename T>
struct tanhupdateOutput_functor
{
__device__ void operator()(T *output, const T *input) const
{
*output = tanh(*input);
}
};
template <typename T>
struct tanhupdateGradInput_functor
{
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = *gradOutput * (1 - *output * *output);
}
};
#include "generic/Tanh.cu"
#include "THHGenerateFloatTypes.h"
| 782b5bd7297b25f18703f3290e566468f7204489.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
template <typename T>
struct tanhupdateOutput_functor
{
__device__ void operator()(T *output, const T *input) const
{
*output = tanh(*input);
}
};
template <typename T>
struct tanhupdateGradInput_functor
{
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = *gradOutput * (1 - *output * *output);
}
};
#include "generic/Tanh.cu"
#include "THCGenerateFloatTypes.h"
|
c94382976b61ad85db53de9434bfae4b0fe8f2c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#define ASSERT_NO_CUDA_ERROR() { \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) { \
printf("Cuda error (%s/%d) in file '%s' in line %i\n", \
hipGetErrorString(err), err, __FILE__, __LINE__); \
exit(1); \
} \
} while(0);
#define TRACE() { \
trace[(lid*x*y*8)+((loop)*8)+0] = A[0][0]; \
trace[(lid*x*y*8)+((loop)*8)+1] = A[0][1]; \
trace[(lid*x*y*8)+((loop)*8)+2] = A[0][2]; \
trace[(lid*x*y*8)+((loop)*8)+3] = A[0][3]; \
trace[(lid*x*y*8)+((loop)*8)+4] = A[1][0]; \
trace[(lid*x*y*8)+((loop)*8)+5] = A[1][1]; \
trace[(lid*x*y*8)+((loop)*8)+6] = A[1][2]; \
trace[(lid*x*y*8)+((loop)*8)+7] = A[1][3]; \
loop++; \
} while(0);
//set x and y through xyvals
__global__ void k2(int *xyvals, int *trace, int*final) {
__shared__ int A[2][4];
int buf, x, y, i, j;
int lid = threadIdx.x;
//initialize A
if (lid == 0) {
A[0][0] = 0; A[0][1] = 1; A[0][2] = 2; A[0][3] = 3;
A[1][0] = -1; A[1][1] = -1; A[1][2] = -1; A[1][3] = -1;
}
__syncthreads();
x = (lid == 0 ? xyvals[0] : xyvals[1]);
y = (lid == 0 ? xyvals[1] : xyvals[0]);
buf = i = 0;
int loop = 0;
while (i < x) {
j = 0;
while (j < y) {
__syncthreads();
TRACE();
A[1-buf][lid] = A[buf][(lid+1)%4];
buf = 1 - buf;
j++;
}
i++;
}
__syncthreads();
if (lid == 0) {
final[0] = A[0][0]; final[1] = A[0][1];
final[2] = A[0][2]; final[3] = A[0][3];
final[4] = A[1][0]; final[5] = A[1][1];
final[6] = A[1][2]; final[7] = A[1][3];
}
}
#define GROUPSIZE 4
int main(int argc, char **argv) {
// thread0 runs outer xyvals[0] times
// inner xyvals[1] times
// other threads do opposite
int xyvals[2];
if (argc == 3) {
xyvals[0] = atoi(argv[1]);
xyvals[1] = atoi(argv[2]);
} else {
xyvals[0] = 4;
xyvals[1] = 1;
}
int *d_xyvals;
size_t d_xyvals_size = sizeof(int)*2;
hipMalloc((void **)&d_xyvals, d_xyvals_size);
hipMemcpy(d_xyvals, xyvals, d_xyvals_size, hipMemcpyHostToDevice);
// trace shared array A[] after each __syncthreads, for each thread
// number of trace items :=
// 8 values in A[]
// __syncthreads() hit (xyvals[0]*xyvals[1]) times
// by GROUPSIZE threads
int ntrace = 8 * (xyvals[0]*xyvals[1]) * GROUPSIZE;
int *trace = new int[ntrace];
for (int i=0; i<ntrace; i++) {
trace[i] = 99;
}
int *d_trace;
size_t d_trace_size = sizeof(int)*ntrace;
hipMalloc((void **)&d_trace, d_trace_size);
hipMemcpy(d_trace, trace, d_trace_size, hipMemcpyHostToDevice);
// also record the final state of A
int final[8];
for (int i=0; i<8; i++) {
final[i] = 99;
}
int *d_final;
size_t d_final_size = sizeof(int)*8;
hipMalloc((void **)&d_final, d_final_size);
hipMemcpy(d_final, final, d_final_size, hipMemcpyHostToDevice);
// run kernel
printf("Set x and y through xyvals[%d,%d]...", xyvals[0], xyvals[1]);
ASSERT_NO_CUDA_ERROR();
hipLaunchKernelGGL(( k2), dim3(/*gridDim=*/1), dim3(GROUPSIZE), 0, 0, d_xyvals, d_trace, d_final);
ASSERT_NO_CUDA_ERROR();
printf("[done]\n");
// print out trace
hipMemcpy(trace, d_trace, d_trace_size, hipMemcpyDeviceToHost);
int stride = 8 * (xyvals[0]*xyvals[1]);
for (int lid=0; lid<GROUPSIZE; lid++) {
printf("lid = %d\n", lid);
for (int xy=0; xy<(xyvals[0]*xyvals[1]); xy++) {
printf("(%d) A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
xy,
trace[(lid*stride)+(xy*8)+0], trace[(lid*stride)+(xy*8)+1],
trace[(lid*stride)+(xy*8)+2], trace[(lid*stride)+(xy*8)+3],
trace[(lid*stride)+(xy*8)+4], trace[(lid*stride)+(xy*8)+5],
trace[(lid*stride)+(xy*8)+6], trace[(lid*stride)+(xy*8)+7]
);
}
printf("---\n");
}
// print out final state
hipMemcpy(final, d_final, d_final_size, hipMemcpyDeviceToHost);
printf("final state\n");
printf(" A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
final[0],final[1],final[2],final[3],
final[4],final[5],final[6],final[7]);
hipFree(d_xyvals);
hipFree(d_trace);
hipFree(d_final);
delete[] trace;
return 0;
}
| c94382976b61ad85db53de9434bfae4b0fe8f2c2.cu | #include <cstdio>
#define ASSERT_NO_CUDA_ERROR() { \
cudaThreadSynchronize(); \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) { \
printf("Cuda error (%s/%d) in file '%s' in line %i\n", \
cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(1); \
} \
} while(0);
#define TRACE() { \
trace[(lid*x*y*8)+((loop)*8)+0] = A[0][0]; \
trace[(lid*x*y*8)+((loop)*8)+1] = A[0][1]; \
trace[(lid*x*y*8)+((loop)*8)+2] = A[0][2]; \
trace[(lid*x*y*8)+((loop)*8)+3] = A[0][3]; \
trace[(lid*x*y*8)+((loop)*8)+4] = A[1][0]; \
trace[(lid*x*y*8)+((loop)*8)+5] = A[1][1]; \
trace[(lid*x*y*8)+((loop)*8)+6] = A[1][2]; \
trace[(lid*x*y*8)+((loop)*8)+7] = A[1][3]; \
loop++; \
} while(0);
//set x and y through xyvals
__global__ void k2(int *xyvals, int *trace, int*final) {
__shared__ int A[2][4];
int buf, x, y, i, j;
int lid = threadIdx.x;
//initialize A
if (lid == 0) {
A[0][0] = 0; A[0][1] = 1; A[0][2] = 2; A[0][3] = 3;
A[1][0] = -1; A[1][1] = -1; A[1][2] = -1; A[1][3] = -1;
}
__syncthreads();
x = (lid == 0 ? xyvals[0] : xyvals[1]);
y = (lid == 0 ? xyvals[1] : xyvals[0]);
buf = i = 0;
int loop = 0;
while (i < x) {
j = 0;
while (j < y) {
__syncthreads();
TRACE();
A[1-buf][lid] = A[buf][(lid+1)%4];
buf = 1 - buf;
j++;
}
i++;
}
__syncthreads();
if (lid == 0) {
final[0] = A[0][0]; final[1] = A[0][1];
final[2] = A[0][2]; final[3] = A[0][3];
final[4] = A[1][0]; final[5] = A[1][1];
final[6] = A[1][2]; final[7] = A[1][3];
}
}
#define GROUPSIZE 4
int main(int argc, char **argv) {
// thread0 runs outer xyvals[0] times
// inner xyvals[1] times
// other threads do opposite
int xyvals[2];
if (argc == 3) {
xyvals[0] = atoi(argv[1]);
xyvals[1] = atoi(argv[2]);
} else {
xyvals[0] = 4;
xyvals[1] = 1;
}
int *d_xyvals;
size_t d_xyvals_size = sizeof(int)*2;
cudaMalloc((void **)&d_xyvals, d_xyvals_size);
cudaMemcpy(d_xyvals, xyvals, d_xyvals_size, cudaMemcpyHostToDevice);
// trace shared array A[] after each __syncthreads, for each thread
// number of trace items :=
// 8 values in A[]
// __syncthreads() hit (xyvals[0]*xyvals[1]) times
// by GROUPSIZE threads
int ntrace = 8 * (xyvals[0]*xyvals[1]) * GROUPSIZE;
int *trace = new int[ntrace];
for (int i=0; i<ntrace; i++) {
trace[i] = 99;
}
int *d_trace;
size_t d_trace_size = sizeof(int)*ntrace;
cudaMalloc((void **)&d_trace, d_trace_size);
cudaMemcpy(d_trace, trace, d_trace_size, cudaMemcpyHostToDevice);
// also record the final state of A
int final[8];
for (int i=0; i<8; i++) {
final[i] = 99;
}
int *d_final;
size_t d_final_size = sizeof(int)*8;
cudaMalloc((void **)&d_final, d_final_size);
cudaMemcpy(d_final, final, d_final_size, cudaMemcpyHostToDevice);
// run kernel
printf("Set x and y through xyvals[%d,%d]...", xyvals[0], xyvals[1]);
ASSERT_NO_CUDA_ERROR();
k2<<</*gridDim=*/1, GROUPSIZE>>>(d_xyvals, d_trace, d_final);
ASSERT_NO_CUDA_ERROR();
printf("[done]\n");
// print out trace
cudaMemcpy(trace, d_trace, d_trace_size, cudaMemcpyDeviceToHost);
int stride = 8 * (xyvals[0]*xyvals[1]);
for (int lid=0; lid<GROUPSIZE; lid++) {
printf("lid = %d\n", lid);
for (int xy=0; xy<(xyvals[0]*xyvals[1]); xy++) {
printf("(%d) A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
xy,
trace[(lid*stride)+(xy*8)+0], trace[(lid*stride)+(xy*8)+1],
trace[(lid*stride)+(xy*8)+2], trace[(lid*stride)+(xy*8)+3],
trace[(lid*stride)+(xy*8)+4], trace[(lid*stride)+(xy*8)+5],
trace[(lid*stride)+(xy*8)+6], trace[(lid*stride)+(xy*8)+7]
);
}
printf("---\n");
}
// print out final state
cudaMemcpy(final, d_final, d_final_size, cudaMemcpyDeviceToHost);
printf("final state\n");
printf(" A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
final[0],final[1],final[2],final[3],
final[4],final[5],final[6],final[7]);
cudaFree(d_xyvals);
cudaFree(d_trace);
cudaFree(d_final);
delete[] trace;
return 0;
}
|
4770332554039dd3f3d4b72b789a8863e7b52c23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
//Device code
__device__ __inline__ float dot(const float2 a, const float2 b)
{
return (a.x * b.x) + (a.y * b.y);
}
__device__ float2 calculatePosition(int x, int y, float width, float height)
{
float2 fragSize = make_float2(2 / width, 2 / height);
return make_float2(fragSize.x * x + fragSize.y / 2 - 1, fragSize.y * y + fragSize.y / 2 - 1);
}
__global__ void baryKernel(const float2 *v0, \
const float2 *v1, \
const float2 *v2, \
const int dCount, \
const float *da, \
const float *db, \
const float *dc, \
float *dOut, \
int *dOut_valid, \
const int width, \
const int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
float2 pos = calculatePosition(x, y, width, height);
float2 t0 = make_float2(v2->x, v2->y);
float2 t1 = make_float2(v0->x, v0->y);
float2 t2 = make_float2(v1->x, v1->y);
float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y);
float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y);
float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y);
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float baryX = (d11 * d20 - d01 * d21) / denom;
float baryY = (d00 * d21 - d01 * d20) / denom;
float baryZ = 1 - baryX - baryY;
if (baryX > 0 && baryY > 0 && baryZ > 0)
{
for (int i = 0; i < dCount; i++)
{
dOut[y * width + x + i * (width * height)] = da[i] * baryX + db[i] * baryY + dc[i] * baryZ;
}
dOut_valid[y * width + x] = 1;
}
else
{
dOut[y * width + x] = 0;
dOut_valid[y * width + x] = 0;
}
}
}
}
| 4770332554039dd3f3d4b72b789a8863e7b52c23.cu | extern "C" {
//Device code
__device__ __inline__ float dot(const float2 a, const float2 b)
{
return (a.x * b.x) + (a.y * b.y);
}
__device__ float2 calculatePosition(int x, int y, float width, float height)
{
float2 fragSize = make_float2(2 / width, 2 / height);
return make_float2(fragSize.x * x + fragSize.y / 2 - 1, fragSize.y * y + fragSize.y / 2 - 1);
}
__global__ void baryKernel(const float2 *v0, \
const float2 *v1, \
const float2 *v2, \
const int dCount, \
const float *da, \
const float *db, \
const float *dc, \
float *dOut, \
int *dOut_valid, \
const int width, \
const int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
float2 pos = calculatePosition(x, y, width, height);
float2 t0 = make_float2(v2->x, v2->y);
float2 t1 = make_float2(v0->x, v0->y);
float2 t2 = make_float2(v1->x, v1->y);
float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y);
float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y);
float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y);
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float baryX = (d11 * d20 - d01 * d21) / denom;
float baryY = (d00 * d21 - d01 * d20) / denom;
float baryZ = 1 - baryX - baryY;
if (baryX > 0 && baryY > 0 && baryZ > 0)
{
for (int i = 0; i < dCount; i++)
{
dOut[y * width + x + i * (width * height)] = da[i] * baryX + db[i] * baryY + dc[i] * baryZ;
}
dOut_valid[y * width + x] = 1;
}
else
{
dOut[y * width + x] = 0;
dOut_valid[y * width + x] = 0;
}
}
}
}
|
3d2e4c8546115e873c4f8d281459f2a73f983c1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <common/scatter.cuh>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
template <typename DataT, typename IdxT>
__global__ void naiveScatterKernel(DataT *out, const DataT *in, const IdxT *idx,
IdxT len) {
IdxT tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
out[tid] = in[idx[tid]];
}
}
template <typename DataT, typename IdxT>
void naiveScatter(DataT *out, const DataT *in, const IdxT *idx, IdxT len,
hipStream_t stream) {
int nblks = ceildiv<int>(len, 128);
hipLaunchKernelGGL(( naiveScatterKernel<DataT, IdxT>), dim3(nblks), dim3(128), 0, stream, out, in, idx, len);
}
struct ScatterInputs {
int len;
unsigned long long int seed;
};
template <typename DataT>
class ScatterTest : public ::testing::TestWithParam<ScatterInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ScatterInputs>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(hipStreamCreate(&stream));
int len = params.len;
allocate(in, len);
allocate(ref_out, len);
allocate(out, len);
allocate(idx, len);
r.uniform(in, len, DataT(-1.0), DataT(1.0), stream);
{
std::vector<int> h_idx(len, 0);
for (int i = 0; i < len; ++i) {
h_idx[i] = i;
}
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(h_idx.begin(), h_idx.end(), g);
updateDevice(idx, &(h_idx[0]), len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
naiveScatter(ref_out, in, idx, len, stream);
scatter(out, in, idx, len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(ref_out));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(idx));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream;
ScatterInputs params;
DataT *in, *ref_out, *out;
int *idx;
};
const std::vector<ScatterInputs> inputs = {
{128, 1234ULL}, {129, 1234ULL}, {130, 1234ULL}};
typedef ScatterTest<float> ScatterTestF;
TEST_P(ScatterTestF, Result) {
ASSERT_TRUE(devArrMatch(out, ref_out, params.len, Compare<float>()));
}
INSTANTIATE_TEST_CASE_P(ScatterTests, ScatterTestF,
::testing::ValuesIn(inputs));
typedef ScatterTest<double> ScatterTestD;
TEST_P(ScatterTestD, Result) {
ASSERT_TRUE(devArrMatch(out, ref_out, params.len, Compare<double>()));
}
INSTANTIATE_TEST_CASE_P(ScatterTests, ScatterTestD,
::testing::ValuesIn(inputs));
} // end namespace MLCommon
| 3d2e4c8546115e873c4f8d281459f2a73f983c1c.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <common/scatter.cuh>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
template <typename DataT, typename IdxT>
__global__ void naiveScatterKernel(DataT *out, const DataT *in, const IdxT *idx,
IdxT len) {
IdxT tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
out[tid] = in[idx[tid]];
}
}
template <typename DataT, typename IdxT>
void naiveScatter(DataT *out, const DataT *in, const IdxT *idx, IdxT len,
cudaStream_t stream) {
int nblks = ceildiv<int>(len, 128);
naiveScatterKernel<DataT, IdxT><<<nblks, 128, 0, stream>>>(out, in, idx, len);
}
struct ScatterInputs {
int len;
unsigned long long int seed;
};
template <typename DataT>
class ScatterTest : public ::testing::TestWithParam<ScatterInputs> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<ScatterInputs>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
int len = params.len;
allocate(in, len);
allocate(ref_out, len);
allocate(out, len);
allocate(idx, len);
r.uniform(in, len, DataT(-1.0), DataT(1.0), stream);
{
std::vector<int> h_idx(len, 0);
for (int i = 0; i < len; ++i) {
h_idx[i] = i;
}
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(h_idx.begin(), h_idx.end(), g);
updateDevice(idx, &(h_idx[0]), len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
naiveScatter(ref_out, in, idx, len, stream);
scatter(out, in, idx, len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(ref_out));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(idx));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream;
ScatterInputs params;
DataT *in, *ref_out, *out;
int *idx;
};
const std::vector<ScatterInputs> inputs = {
{128, 1234ULL}, {129, 1234ULL}, {130, 1234ULL}};
typedef ScatterTest<float> ScatterTestF;
TEST_P(ScatterTestF, Result) {
ASSERT_TRUE(devArrMatch(out, ref_out, params.len, Compare<float>()));
}
INSTANTIATE_TEST_CASE_P(ScatterTests, ScatterTestF,
::testing::ValuesIn(inputs));
typedef ScatterTest<double> ScatterTestD;
TEST_P(ScatterTestD, Result) {
ASSERT_TRUE(devArrMatch(out, ref_out, params.len, Compare<double>()));
}
INSTANTIATE_TEST_CASE_P(ScatterTests, ScatterTestD,
::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
f4355940053158f77ef9db255e2b977b45915a70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": line %d: %s: %s\n", line, msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
}
}
}
| f4355940053158f77ef9db255e2b977b45915a70.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": line %d: %s: %s\n", line, msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
}
}
}
|
b416498fcc0c16e5af614dbb2b7ad157a92f6dde.hip | // !!! This is a file automatically generated by hipify!!!
/*
* benchmark.cpp
*
* Created on: Sep 4, 2018
* Author: hb4ch
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include "benchmark.hpp"
#include "misc.hpp"
#include "aes-cuda.hpp"
#include "faster-cuda-aes.hpp"
size_t GetTimeMS() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (int64_t) tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
void aes_benchmark::start() {
std::printf("[Benchmark started.]\n");
std::printf("File size = %lld \n", this->file_size);
unsigned char * d_fb;
unsigned char * d_result;
unsigned char * h_result;
checkCudaErrors(hipMalloc((void ** )&d_fb, file_size));
// Device file_buffer malloc'd
checkCudaErrors(hipMalloc((void ** )&d_result, file_size));
// Device result buffer malloc'd
checkCudaErrors(hipHostMalloc((void ** )&h_result, file_size));
// Host result buffer malloc'd (pinned)
checkCudaErrors(
hipMemcpyToSymbol(g_expanded_key, &expanded_key[0], 176, size_t(0),
hipMemcpyHostToDevice));
// copy expanded key to device.
unsigned char * temp_d_result = d_result;
unsigned char * temp_d_fb = d_fb;
unsigned char * temp_file_buffer = file_buffer;
unsigned char * temp_h_result = h_result;
size_t total_batches = file_size / batch_size;
std::printf("total_batches = %d\n", total_batches);
std::printf("batch_size = %d\n", batch_size);
std::printf("Cuda block num = %d\n", batch_size / 16);
size_t begin_ts;
begin_ts = GetTimeMS();
std::printf("HYPER_Q = %d\n", this->hyper_q);
dim3 nblock(((file_size / 16) + 32*64 - 1) / (32*64), 128);
if (hyper_q > 1) {
int stream_num = hyper_q;
int batches_per_stream = total_batches / stream_num;
hipStream_t streams[stream_num];
for (int i = 0; i < stream_num; i++)
hipStreamCreate(&streams[i]);
// Async create stream
for (int i = 0; i < batches_per_stream; i++) {
for (int j = 0; j < stream_num; j++) {
checkCudaErrors(
hipMemcpyAsync(temp_d_fb, temp_file_buffer, batch_size,
hipMemcpyHostToDevice, streams[j]));
hipLaunchKernelGGL(( cuda_main_kernel), dim3(batch_size / 16 / 1024), dim3(1024), 0, 0,
temp_d_fb, batch_size / 16, temp_d_result);
checkCudaErrors(
hipMemcpyAsync(temp_h_result, temp_d_result,
batch_size, hipMemcpyDeviceToHost,
streams[j]));
temp_d_result += batch_size;
temp_d_fb += batch_size;
temp_h_result += batch_size;
temp_file_buffer += batch_size;
}
checkCudaErrors(hipDeviceSynchronize());
}
for (int i = 0; i < stream_num; i++)
hipStreamDestroy(streams[i]);
} else {
for (int i = 0; i < total_batches; i++) {
checkCudaErrors(
hipMemcpy(temp_d_fb, temp_file_buffer, batch_size,
hipMemcpyHostToDevice));
// Copy one batch of data to device
hipLaunchKernelGGL(( cuda_main_kernel), dim3(batch_size / 16 / 1024), dim3(1024), 0, 0,
//cuda_main_kernel, nblock , 64, 0, 0,
temp_d_fb, batch_size / 16, temp_d_result);
/*ctr_encrypt_nofrag_perword<<<nblock, 64>>>
((uint8_t *)temp_d_fb, (uint8_t *)g_expanded_key, total_size / 16);*/
// transfer<<<batch_size / 4 , 1>>>(temp_d_fb, temp_d_result);
// Running kernel to cipher a 16 byte cipher block
// Each CUDA block process a 16 byte cipher block
checkCudaErrors(hipDeviceSynchronize());
// Barrier
checkCudaErrors(
hipMemcpy(temp_h_result, temp_d_result, batch_size,
hipMemcpyDeviceToHost));
// Fetch the result back
temp_d_result += batch_size;
temp_d_fb += batch_size;
temp_h_result += batch_size;
temp_file_buffer += batch_size;
}
}
// Now the kernel is expected to process the given file_buffer into result_buffer
// One batch is infused into device, processed and then fetched back.
// The loop stops until the file is done ciphering.
std::printf("Loop done!\n");
size_t end_ts = GetTimeMS();
double thruput = (total_size / (1024 * 1024 * 1024 + 0.0))
/ ((end_ts - begin_ts) / 1000.0);
double latency = (end_ts - begin_ts) / (total_batches / 1000.0);
printf("total time: %lf s\n", (end_ts - begin_ts) / 1000.0);
printf("Thruput: %lf GB/s, latency: %lf us/batch \n", thruput, latency);
printf("[End benchmark]\n");
checkCudaErrors(hipHostFree(h_result));
checkCudaErrors(hipFree(d_fb));
checkCudaErrors(hipFree(d_result));
// Leave file_buffer alone
// It is freed in superclass d'tor
}
| b416498fcc0c16e5af614dbb2b7ad157a92f6dde.cu | /*
* benchmark.cpp
*
* Created on: Sep 4, 2018
* Author: hb4ch
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include "benchmark.hpp"
#include "misc.hpp"
#include "aes-cuda.hpp"
#include "faster-cuda-aes.hpp"
size_t GetTimeMS() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (int64_t) tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
void aes_benchmark::start() {
std::printf("[Benchmark started.]\n");
std::printf("File size = %lld \n", this->file_size);
unsigned char * d_fb;
unsigned char * d_result;
unsigned char * h_result;
checkCudaErrors(cudaMalloc((void ** )&d_fb, file_size));
// Device file_buffer malloc'd
checkCudaErrors(cudaMalloc((void ** )&d_result, file_size));
// Device result buffer malloc'd
checkCudaErrors(cudaMallocHost((void ** )&h_result, file_size));
// Host result buffer malloc'd (pinned)
checkCudaErrors(
cudaMemcpyToSymbol(g_expanded_key, &expanded_key[0], 176, size_t(0),
cudaMemcpyHostToDevice));
// copy expanded key to device.
unsigned char * temp_d_result = d_result;
unsigned char * temp_d_fb = d_fb;
unsigned char * temp_file_buffer = file_buffer;
unsigned char * temp_h_result = h_result;
size_t total_batches = file_size / batch_size;
std::printf("total_batches = %d\n", total_batches);
std::printf("batch_size = %d\n", batch_size);
std::printf("Cuda block num = %d\n", batch_size / 16);
size_t begin_ts;
begin_ts = GetTimeMS();
std::printf("HYPER_Q = %d\n", this->hyper_q);
dim3 nblock(((file_size / 16) + 32*64 - 1) / (32*64), 128);
if (hyper_q > 1) {
int stream_num = hyper_q;
int batches_per_stream = total_batches / stream_num;
cudaStream_t streams[stream_num];
for (int i = 0; i < stream_num; i++)
cudaStreamCreate(&streams[i]);
// Async create stream
for (int i = 0; i < batches_per_stream; i++) {
for (int j = 0; j < stream_num; j++) {
checkCudaErrors(
cudaMemcpyAsync(temp_d_fb, temp_file_buffer, batch_size,
cudaMemcpyHostToDevice, streams[j]));
cuda_main_kernel<<<batch_size / 16 / 1024, 1024>>>
(temp_d_fb, batch_size / 16, temp_d_result);
checkCudaErrors(
cudaMemcpyAsync(temp_h_result, temp_d_result,
batch_size, cudaMemcpyDeviceToHost,
streams[j]));
temp_d_result += batch_size;
temp_d_fb += batch_size;
temp_h_result += batch_size;
temp_file_buffer += batch_size;
}
checkCudaErrors(cudaDeviceSynchronize());
}
for (int i = 0; i < stream_num; i++)
cudaStreamDestroy(streams[i]);
} else {
for (int i = 0; i < total_batches; i++) {
checkCudaErrors(
cudaMemcpy(temp_d_fb, temp_file_buffer, batch_size,
cudaMemcpyHostToDevice));
// Copy one batch of data to device
cuda_main_kernel<<<batch_size / 16 / 1024, 1024>>>
//cuda_main_kernel<<<nblock , 64>>>
(temp_d_fb, batch_size / 16, temp_d_result);
/*ctr_encrypt_nofrag_perword<<<nblock, 64>>>
((uint8_t *)temp_d_fb, (uint8_t *)g_expanded_key, total_size / 16);*/
// transfer<<<batch_size / 4 , 1>>>(temp_d_fb, temp_d_result);
// Running kernel to cipher a 16 byte cipher block
// Each CUDA block process a 16 byte cipher block
checkCudaErrors(cudaThreadSynchronize());
// Barrier
checkCudaErrors(
cudaMemcpy(temp_h_result, temp_d_result, batch_size,
cudaMemcpyDeviceToHost));
// Fetch the result back
temp_d_result += batch_size;
temp_d_fb += batch_size;
temp_h_result += batch_size;
temp_file_buffer += batch_size;
}
}
// Now the kernel is expected to process the given file_buffer into result_buffer
// One batch is infused into device, processed and then fetched back.
// The loop stops until the file is done ciphering.
std::printf("Loop done!\n");
size_t end_ts = GetTimeMS();
double thruput = (total_size / (1024 * 1024 * 1024 + 0.0))
/ ((end_ts - begin_ts) / 1000.0);
double latency = (end_ts - begin_ts) / (total_batches / 1000.0);
printf("total time: %lf s\n", (end_ts - begin_ts) / 1000.0);
printf("Thruput: %lf GB/s, latency: %lf us/batch \n", thruput, latency);
printf("[End benchmark]\n");
checkCudaErrors(cudaFreeHost(h_result));
checkCudaErrors(cudaFree(d_fb));
checkCudaErrors(cudaFree(d_result));
// Leave file_buffer alone
// It is freed in superclass d'tor
}
|
e39ebe52e647623b99fb8da80ea3341d5936d736.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 16
#define blockDimX 128
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
| e39ebe52e647623b99fb8da80ea3341d5936d736.cu | #define COALESCED_NUM 16
#define blockDimX 128
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
|
a51fd497fbe168f9f22d59bfdff30ab99cce445a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) {
double __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
double __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
double __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
double __temp_a23__ = (__temp_a18__ + 0.165f * t2);
double __temp_a28__ = (__temp_a23__ + 0.166f * b2);
double __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) {
double __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
double __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
double __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
double __temp_a70__ = (__temp_a65__ + 0.165f * t3);
double __temp_a75__ = (__temp_a70__ + 0.166f * b3);
double __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-1,0))] = __temp_a80__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| a51fd497fbe168f9f22d59bfdff30ab99cce445a.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(0))];
t2 = input[__iter_5__+N*(__iter_4__+M*(1))];
}
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2;
t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) {
double __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
double __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
double __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
double __temp_a23__ = (__temp_a18__ + 0.165f * t2);
double __temp_a28__ = (__temp_a23__ + 0.166f * b2);
double __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3;
t3 = __temp_a33__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) {
double __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
double __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
double __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
double __temp_a70__ = (__temp_a65__ + 0.165f * t3);
double __temp_a75__ = (__temp_a70__ + 0.166f * b3);
double __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-1,0))] = __temp_a80__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
fce919dc65ca86869453ae706d57cd50d4a89108.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
kernel diff does not have a call to __syncthreads(). This program is used to test Nvidia's racecheck profiling tool.
Code taken from Introduction to High Performance Scientific Computing by DL Chopp
*/
#include <cmath>
#include <stdio.h>
#include <stdlib.h>
#include "../error_check_cuda.h"
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
// set the threads per block as a constant multiple of the warp size (32 in virtually every case)
const int threadsPerBlock = 256; // 8 full warps per block
// declare the kernel function
__global__ void diff(double* u, int* N, double* dx, double* du);
/**
* @brief Demonstrate a simple example for implementing a parallel finite difference operator using GPU shared memory
*
* @param argc Should be 2.
* @param argv[1] Length of the vector of data.
* @return int.
*/
int main(int argc, char* argv[]) {
// read in the number of points
int N = atoi(argv[1]);
// determine how many blocks are needed for the whole vector
const int blocksPerGrid = N / threadsPerBlock + (N % threadsPerBlock > 0 ? 1 : 0);
// allocate host memory
double* u = (double*)malloc(N * sizeof(double));
double* du = (double*)malloc(N * sizeof(double));
double dx = 2 * M_PI / N; // finite difference length
double* dev_u;
double* dev_du;
double* dev_dx;
int* dev_N;
// allocate device memory
CheckError(hipMalloc((void**)&dev_u, N * sizeof(double)));
CheckError(hipMalloc((void**)&dev_du, N * sizeof(double)));
CheckError(hipMalloc((void**)&dev_N, sizeof(int)));
CheckError(hipMalloc((void**)&dev_dx, sizeof(double)));
// initialize data on the host
for (int i = 0; i < N; i++) {
u[i] = sin(i * dx);
}
// copy data to device
CheckError(hipMemcpy(dev_u, u, N * sizeof(double), hipMemcpyHostToDevice));
CheckError(hipMemcpy(dev_dx, &dx, sizeof(double), hipMemcpyHostToDevice));
CheckError(hipMemcpy(dev_N, &N, sizeof(int), hipMemcpyHostToDevice));
// execute the finite difference kernel
hipLaunchKernelGGL(( diff), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_u, dev_N, dev_dx, dev_du);
// copy the result back to the host
CheckError(hipMemcpy(du, dev_du, N * sizeof(double), hipMemcpyDeviceToHost));
// clean up allocated device memory
CheckError(hipFree(dev_du));
CheckError(hipFree(dev_N));
CheckError(hipFree(dev_dx));
CheckError(hipFree(dev_u));
for (int i = 0; i < N; i++) {
printf("%f\n", du[i]);
}
// clean up allocated host memory
free(u);
free(du);
return 0;
}
__global__ void diff(double* u, int* N, double* dx, double* du) {
// shared memory is implicitly declared static
__shared__ double local_u[threadsPerBlock + 2]; // need 2 more spaces for computing the finite difference of boundary points
__shared__ double local_du[threadsPerBlock];
// set up global and shared memory indices
int g_i = (threadIdx.x + blockIdx.x * blockDim.x) % *N;
int l_i = threadIdx.x + 1; // add 1 to index because local_u[0] should be one less (mod N) than the first index of the local data
int g_im = (g_i + *N - 1) % *N; // we add N and subtract 1 as opposed to just subtract 1 because data is periodic and we dont want a negative index
int g_ip = (g_i + 1) % *N;
// Transfer global memory to shared memory
local_u[l_i] = u[g_i];
if (threadIdx.x == 0) { // if this thread corresponds to the first element in our local data
local_u[0] = u[g_im]; // make sure to set the 0th index of local_u to one minus the first index + N (mod N)
}
if (threadIdx.x == threadsPerBlock - 1) { // if this thread corresponds to the last element in our local data
local_u[l_i + 1] = u[g_ip]; // set the last element of local_u
}
__syncthreads();
// compute the central finite difference for this thread and store in shared memory
local_du[threadIdx.x] = (local_u[threadIdx.x + 2] - local_u[threadIdx.x]) / (*dx) / 2;
// transfer the results from shared memory to global memory
du[g_i] = local_du[threadIdx.x];
} | fce919dc65ca86869453ae706d57cd50d4a89108.cu | /*
kernel diff does not have a call to __syncthreads(). This program is used to test Nvidia's racecheck profiling tool.
Code taken from Introduction to High Performance Scientific Computing by DL Chopp
*/
#include <cmath>
#include <stdio.h>
#include <stdlib.h>
#include "../error_check_cuda.h"
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
// set the threads per block as a constant multiple of the warp size (32 in virtually every case)
const int threadsPerBlock = 256; // 8 full warps per block
// declare the kernel function
__global__ void diff(double* u, int* N, double* dx, double* du);
/**
* @brief Demonstrate a simple example for implementing a parallel finite difference operator using GPU shared memory
*
* @param argc Should be 2.
* @param argv[1] Length of the vector of data.
* @return int.
*/
int main(int argc, char* argv[]) {
// read in the number of points
int N = atoi(argv[1]);
// determine how many blocks are needed for the whole vector
const int blocksPerGrid = N / threadsPerBlock + (N % threadsPerBlock > 0 ? 1 : 0);
// allocate host memory
double* u = (double*)malloc(N * sizeof(double));
double* du = (double*)malloc(N * sizeof(double));
double dx = 2 * M_PI / N; // finite difference length
double* dev_u;
double* dev_du;
double* dev_dx;
int* dev_N;
// allocate device memory
CheckError(cudaMalloc((void**)&dev_u, N * sizeof(double)));
CheckError(cudaMalloc((void**)&dev_du, N * sizeof(double)));
CheckError(cudaMalloc((void**)&dev_N, sizeof(int)));
CheckError(cudaMalloc((void**)&dev_dx, sizeof(double)));
// initialize data on the host
for (int i = 0; i < N; i++) {
u[i] = sin(i * dx);
}
// copy data to device
CheckError(cudaMemcpy(dev_u, u, N * sizeof(double), cudaMemcpyHostToDevice));
CheckError(cudaMemcpy(dev_dx, &dx, sizeof(double), cudaMemcpyHostToDevice));
CheckError(cudaMemcpy(dev_N, &N, sizeof(int), cudaMemcpyHostToDevice));
// execute the finite difference kernel
diff<<<blocksPerGrid, threadsPerBlock>>>(dev_u, dev_N, dev_dx, dev_du);
// copy the result back to the host
CheckError(cudaMemcpy(du, dev_du, N * sizeof(double), cudaMemcpyDeviceToHost));
// clean up allocated device memory
CheckError(cudaFree(dev_du));
CheckError(cudaFree(dev_N));
CheckError(cudaFree(dev_dx));
CheckError(cudaFree(dev_u));
for (int i = 0; i < N; i++) {
printf("%f\n", du[i]);
}
// clean up allocated host memory
free(u);
free(du);
return 0;
}
__global__ void diff(double* u, int* N, double* dx, double* du) {
// shared memory is implicitly declared static
__shared__ double local_u[threadsPerBlock + 2]; // need 2 more spaces for computing the finite difference of boundary points
__shared__ double local_du[threadsPerBlock];
// set up global and shared memory indices
int g_i = (threadIdx.x + blockIdx.x * blockDim.x) % *N;
int l_i = threadIdx.x + 1; // add 1 to index because local_u[0] should be one less (mod N) than the first index of the local data
int g_im = (g_i + *N - 1) % *N; // we add N and subtract 1 as opposed to just subtract 1 because data is periodic and we dont want a negative index
int g_ip = (g_i + 1) % *N;
// Transfer global memory to shared memory
local_u[l_i] = u[g_i];
if (threadIdx.x == 0) { // if this thread corresponds to the first element in our local data
local_u[0] = u[g_im]; // make sure to set the 0th index of local_u to one minus the first index + N (mod N)
}
if (threadIdx.x == threadsPerBlock - 1) { // if this thread corresponds to the last element in our local data
local_u[l_i + 1] = u[g_ip]; // set the last element of local_u
}
__syncthreads();
// compute the central finite difference for this thread and store in shared memory
local_du[threadIdx.x] = (local_u[threadIdx.x + 2] - local_u[threadIdx.x]) / (*dx) / 2;
// transfer the results from shared memory to global memory
du[g_i] = local_du[threadIdx.x];
} |
878478fb88faec5266201ae25084462b8dd866c5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.hpp>
#include "nvstrings/NVStrings.h"
#include "nvstrings/NVText.h"
#include "../custring_view.cuh"
#include "../custring.cuh"
#include "../util.h"
// This base class walks a string looking for specified delimiter character(s).
// It will automatically ignore adjacent delimiters (different than split).
// The next_token method returns character start position (spos) and end
// position (epos) between delimiter runs identifying each token.
// An iterator is used to retrieve each utf8 character to be checked.
// The spaces parameter identifies a run of delimiters (or not delimiters).
struct base_tokenator
{
custring_view* d_delimiter{nullptr};
__device__ bool is_delimiter(Char ch)
{
if( !d_delimiter )
return (ch <= ' '); // all ascii whitespace
return d_delimiter->find(ch)>=0;
}
__device__ bool next_token( custring_view* dstr, bool& spaces, custring_view::iterator& itr, int& spos, int& epos )
{
if( spos >= dstr->chars_count() )
return false;
for( ; itr != dstr->end(); ++itr )
{
Char ch = *itr;
if( spaces == is_delimiter(ch) )
{
if( spaces )
spos = itr.position()+1;
else
epos = itr.position()+1;
continue;
}
spaces = !spaces;
if( spaces )
{
epos = itr.position();
break;
}
}
return spos < epos;
}
};
//
struct tokenize_fn : base_tokenator
{
custring_view_array d_strings;
size_t* d_counts;
size_t* d_offsets;
thrust::pair<const char*,size_t>* d_tokens;
tokenize_fn( custring_view_array d_strings, custring_view* d_delimiter, size_t* d_counts, size_t* d_offsets=nullptr, thrust::pair<const char*,size_t>* d_tokens=nullptr )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_counts(d_counts), d_offsets(d_offsets), d_tokens(d_tokens) {}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
thrust::pair<const char*,size_t>* dstr_tokens = nullptr;
if( d_tokens )
{
if( d_counts[idx]==0 )
return;
dstr_tokens = d_tokens + d_offsets[idx];
}
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars, tidx = 0;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
if( dstr_tokens )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
dstr_tokens[tidx].first = dstr->data() + spos_bo;
dstr_tokens[tidx].second = (epos_bo-spos_bo);
}
spos = epos + 1;
epos = nchars;
++itr;
++tidx;
}
d_counts[idx] = tidx;
}
};
NVStrings* NVText::tokenize(NVStrings& strs, const char* delimiter)
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings,d_delimiter,d_counts));
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings, d_delimiter, d_counts, d_offsets, d_tokens));
//
RMM_FREE(d_delimiter,0);
// build strings object from tokens elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,tokens_count);
}
// same but with multiple delimiters
NVStrings* NVText::tokenize(NVStrings& strs, NVStrings& delims)
{
unsigned int delims_count = delims.size();
if( delims_count==0 )
return NVText::tokenize(strs);
auto execpol = rmm::exec_policy(0);
rmm::device_vector<custring_view*> delimiters(delims_count,nullptr);
custring_view** d_delimiters = delimiters.data().get();
delims.create_custring_index(d_delimiters);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiters, delims_count, d_counts] __device__(unsigned int idx){
custring_view* d_string = d_strings[idx];
if( !d_string )
return;
int tokens = 1;
const char* sptr = d_string->data();
const char* eptr = sptr + d_string->size();
while( sptr < eptr )
{
int incr = 1;
for( int didx=0; didx < delims_count; ++didx )
{
custring_view* d_delim = d_delimiters[didx];
if( !d_delim || d_delim->empty() )
continue;
if( d_delim->compare(sptr,d_delim->size()) !=0 )
continue;
++tokens;
incr = d_delim->size();
break;
}
sptr += incr;
}
d_counts[idx] = tokens;
});
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
size_t* d_offsets = offsets.data().get();
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiters, delims_count, d_counts, d_offsets, d_tokens] __device__(unsigned int idx) {
custring_view* d_string = d_strings[idx];
if( !d_string )
return;
size_t token_count = d_counts[idx];
if( token_count==0 )
return;
auto dstr_tokens = d_tokens + d_offsets[idx];
const char* data = d_string->data();
const char* sptr = data;
auto size = d_string->size();
const char* eptr = sptr + size;
int spos = 0, tidx = 0;
while( sptr < eptr )
{
int incr = 1;
for( int didx=0; didx < delims_count; ++didx )
{
custring_view* d_delim = d_delimiters[didx];
if( !d_delim || d_delim->empty() )
continue;
if( d_delim->compare(sptr,d_delim->size()) !=0 )
continue;
// found delimiter
dstr_tokens[tidx].first = data + spos;
dstr_tokens[tidx].second = ((sptr - data) - spos);
++tidx;
incr = d_delim->size();
spos = (sptr - data) + incr;
break;
}
sptr += incr;
}
if( (tidx < token_count) && (spos < size) )
{
dstr_tokens[tidx].first = data + spos;
dstr_tokens[tidx].second = size - spos;
}
});
// remove any empty strings -- occurs if two delimiters are next to each other
auto end = thrust::remove_if(execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t> w ) { return w.second==0; } );
unsigned int nsize = (unsigned int)(end - d_tokens); // new token count
//
// build strings object from tokens elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,nsize);
}
//
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter)
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings,d_delimiter,d_counts));
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings, d_delimiter, d_counts, d_offsets, d_tokens));
//
RMM_FREE(d_delimiter,0);
thrust::sort( execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs) {
return custr::compare(lhs.first,(unsigned)lhs.second,rhs.first,(unsigned)rhs.second)<0;
});
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
unsigned int newsize = (unsigned int)(newend - d_tokens); // new size
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,newsize);
}
// Your basic token counter
struct nvtext_token_counter : base_tokenator
{
custring_view_array d_strings;
unsigned int* d_counts;
//
nvtext_token_counter( custring_view_array d_strings, custring_view* d_delimiter, unsigned int* d_counts )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_counts(d_counts) {}
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
unsigned int token_count = 0;
if( dstr )
{
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
++token_count;
spos = epos + 1; // setup
epos = nchars; // for next
++itr; // token
}
}
d_counts[idx] = token_count;
}
};
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
d_counts = device_alloc<unsigned int>(count,0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
nvtext_token_counter{d_strings,d_delimiter,d_counts});
//
if( !bdevmem )
{
CUDA_TRY( hipMemcpyAsync(results,d_counts,count*sizeof(unsigned int),hipMemcpyDeviceToHost))
RMM_FREE(d_counts,0);
}
RMM_FREE(d_delimiter,0);
return 0;
}
//
struct tokens_counts_fn : base_tokenator
{
custring_view_array d_strings;
custring_view_array d_tokens;
unsigned int token_count;
int* d_token_indexes;
unsigned int* d_results;
tokens_counts_fn( custring_view_array d_strings, custring_view_array d_tokens, unsigned int token_count,
int* d_token_indexes, custring_view* d_delimiter, unsigned int* d_results )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_tokens(d_tokens), token_count(token_count),
d_token_indexes(d_token_indexes), d_results(d_results) {}
__device__ int match_tokens( custring_view* dstr, int spos_bo, int epos_bo )
{
int length = epos_bo - spos_bo;
for( int tidx=0; tidx < token_count; ++tidx )
{
custring_view* d_token = d_tokens[tidx];
if( d_token &&
(length==d_token->size()) &&
(d_token->compare(dstr->data()+spos_bo,length)==0) )
{
return tidx;
}
}
return -1;
}
__device__ int match_sorted_tokens( custring_view* dstr, int spos_bo, int epos_bo )
{
int left = 0, right = token_count -1;
int length = epos_bo - spos_bo;
while( left <= right )
{
int tidx = (left + right)/2;
custring_view* d_token = d_tokens[tidx];
int cmp = (d_token ? d_token->compare(dstr->data()+spos_bo,length) : -1);
if( cmp < 0 )
left = tidx + 1;
else if( cmp > 0 )
right = tidx - 1;
else
return d_token_indexes[tidx];
}
return -1;
}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
unsigned int* dresult = d_results + (idx*token_count);
// initialize to zero
for( int tidx=0; tidx < token_count; ++tidx )
dresult[tidx] = 0;
if( !dstr )
return;
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
// check against all the tokens
int tidx = match_sorted_tokens(dstr,spos_bo,epos_bo);
if( tidx >= 0 )
++dresult[tidx];
spos = epos + 1;
epos = nchars;
++itr;
}
}
};
unsigned int NVText::tokens_counts( NVStrings& strs, NVStrings& tkns, const char* delimiter, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_results = results;
if( !todevice )
d_results = device_alloc<unsigned int>(tcount*count,0);
custring_view* d_delimiter = custring_from_host(delimiter);
// get the strings
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
// sort the tokens
rmm::device_vector<int> token_indexes(tcount);
thrust::sequence(execpol->on(0), token_indexes.begin(), token_indexes.end());
int* d_token_indexes = token_indexes.data().get();
thrust::sort_by_key(execpol->on(0), d_tokens, d_tokens+tcount, d_token_indexes,
[] __device__( custring_view*& lhs, custring_view*& rhs ) {
if( lhs==0 || rhs==0 )
return (rhs!=0); // null < non-null
return lhs->compare(*rhs)<0;
});
// count the tokens
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokens_counts_fn(d_strings, d_tokens, tcount, d_token_indexes, d_delimiter, d_results) );
// done
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_results,sizeof(unsigned int)*count*tcount,hipMemcpyDeviceToHost))
RMM_FREE(d_results,0);
}
return 0;
}
struct replace_tokens_fn : base_tokenator
{
custring_view_array d_strings;
custring_view_array d_tokens;
unsigned int token_count;
custring_view_array d_repls;
unsigned int repl_count;
custring_view* d_delimiter;
size_t* d_offsets;
bool bcompute_size_only;
char* d_buffer;
thrust::pair<const char*,size_t>* d_indexes;
replace_tokens_fn( custring_view_array d_strings, custring_view_array d_tokens, unsigned int token_count,
custring_view_array d_repls, unsigned int repl_count, custring_view* d_delimiter,
size_t* d_offsets, bool bcompute_size_only=true, char* d_buffer=nullptr,
thrust::pair<const char*,size_t>* d_indexes=nullptr )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_tokens(d_tokens), token_count(token_count),
d_repls(d_repls), repl_count(repl_count), d_offsets(d_offsets), bcompute_size_only(bcompute_size_only),
d_buffer(d_buffer), d_indexes(d_indexes) {}
//
__device__ void operator()(unsigned int idx)
{
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* sptr = dstr->data(); // input buffer
char* buffer = nullptr; // output buffer
if( !bcompute_size_only )
buffer = d_buffer + d_offsets[idx];
char* optr = buffer; // running output pointer
int nbytes = dstr->size(), nchars = dstr->chars_count();
int lpos = 0, spos = 0, epos = nchars;
bool spaces = true;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
// check against all the tokens
for( int tidx=0; tidx < token_count; ++tidx )
{
custring_view* d_token = d_tokens[tidx];
int length = epos_bo - spos_bo;
if( d_token &&
(length==d_token->size()) &&
(d_token->compare(dstr->data()+spos_bo,length)==0) )
{
custring_view* d_repl = (repl_count==1 ? d_repls[0] : d_repls[tidx]);
nbytes += (d_repl ? d_repl->size():0) - length;
if( !bcompute_size_only )
{
copy_and_incr(optr,sptr+lpos,spos_bo-lpos);
if( d_repl )
copy_and_incr(optr,d_repl->data(),d_repl->size());
lpos = epos_bo;
}
itr = custring_view::iterator(*dstr,epos);
break;
}
}
spos = epos + 1;
epos = nchars;
itr++;
}
// set result
if( bcompute_size_only )
d_offsets[idx] = nbytes;
else
{
memcpy( optr, sptr+lpos, dstr->size()-lpos );
d_indexes[idx].first = buffer;
d_indexes[idx].second = nbytes;
}
}
};
NVStrings* NVText::replace_tokens(NVStrings& strs, NVStrings& tgts, NVStrings& repls, const char* delimiter)
{
if( strs.size()==0 || tgts.size()==0 )
return strs.copy();
if( (repls.size() > 1) && (repls.size()!=tgts.size()) )
throw std::runtime_error("replace-tokens tokens and replacements must have the same number of strings");
auto execpol = rmm::exec_policy(0);
// go get the strings for all the parameters
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
unsigned int token_count = tgts.size();
rmm::device_vector<custring_view*> tokens(token_count,nullptr);
custring_view** d_tokens = tokens.data().get();
tgts.create_custring_index(d_tokens);
unsigned int repl_count = repls.size();
rmm::device_vector<custring_view*> repl_strings(repl_count,nullptr);
custring_view** d_repls = repl_strings.data().get();
repls.create_custring_index(d_repls);
custring_view* d_delimiter = custring_from_host(delimiter);
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
char* d_buffer = nullptr;
// calculate size of the output, allocate and then do the operation
enum scan_and_operate { scan, operate };
auto op = scan;
while(true)
{
// 1st pass just calculates; 2nd pass will do the replace
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
replace_tokens_fn{d_strings, d_tokens, token_count, d_repls, repl_count, d_delimiter, d_offsets, (op==scan), d_buffer, d_indexes} );
if( op==operate )
break; // done after 2nd pass
op = operate;
// allocate memory for the output
size_t buffer_size = thrust::reduce(execpol->on(0), d_offsets, d_offsets+count);
if( buffer_size==0 )
return nullptr;
d_buffer = device_alloc<char>(buffer_size,0);
// convert lengths to offsets
thrust::exclusive_scan( execpol->on(0), offsets.begin(), offsets.end(), offsets.begin() );
}
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// Kernel operator for normalizing whitespace
struct normalize_spaces_fn : base_tokenator
{
custring_view_array d_strings;
size_t* d_offsets;
bool bcompute_size_only{true};
char* d_buffer;
thrust::pair<const char*,size_t>* d_indexes;
normalize_spaces_fn( custring_view_array d_strings, size_t* d_offsets,
bool bcompute_size_only=true, char* d_buffer=nullptr,
thrust::pair<const char*,size_t>* d_indexes=nullptr)
: d_strings(d_strings), d_offsets(d_offsets), bcompute_size_only(bcompute_size_only),
d_buffer(d_buffer), d_indexes(d_indexes) {}
//
__device__ void operator()(unsigned int idx)
{
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* sptr = dstr->data(); // input buffer
char* buffer = nullptr; // output buffer
if( !bcompute_size_only )
buffer = d_buffer + d_offsets[idx];
char* optr = buffer; // running output pointer
int nbytes = 0, spos = 0, epos = dstr->chars_count();
bool spaces = true;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
nbytes += epos_bo - spos_bo + 1; // include space per token
if( !bcompute_size_only )
{
if( optr != buffer )
copy_and_incr(optr,(char*)" ",1); // add just one space
copy_and_incr(optr,sptr+spos_bo,epos_bo-spos_bo); // copy token
}
spos = epos + 1;
epos = dstr->chars_count();
itr++; // skip the first whitespace
}
// set result (remove extra space for last token)
if( bcompute_size_only )
d_offsets[idx] = (nbytes ? nbytes-1:0);
else
{
d_indexes[idx].first = buffer;
d_indexes[idx].second = (nbytes ? nbytes-1:0);
}
}
};
// Replaces a run of whitespace with a single space character.
// Also trims whitespace from the beginning and end of each string.
NVStrings* NVText::normalize_spaces(NVStrings& strs)
{
if( strs.size()==0 )
return strs.copy();
auto execpol = rmm::exec_policy(0);
// go get the strings for all the parameters
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// create working variables/memory
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
char* d_buffer = nullptr;
// calculate size of the output, allocate and then do the operation
enum scan_and_operate { scan, operate };
auto op = scan;
while(true)
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
normalize_spaces_fn{d_strings, d_offsets, (op==scan), d_buffer, d_indexes} );
if( op==operate )
break; // done after 2nd pass
op = operate;
// allocate memory for the output
size_t buffer_size = thrust::reduce(execpol->on(0), d_offsets, d_offsets+count);
if( buffer_size==0 )
return nullptr;
d_buffer = device_alloc<char>(buffer_size,0);
// convert lengths to offsets
thrust::exclusive_scan( execpol->on(0), offsets.begin(), offsets.end(), offsets.begin() );
}
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
| 878478fb88faec5266201ae25084462b8dd866c5.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error_utils.hpp>
#include "nvstrings/NVStrings.h"
#include "nvstrings/NVText.h"
#include "../custring_view.cuh"
#include "../custring.cuh"
#include "../util.h"
// This base class walks a string looking for specified delimiter character(s).
// It will automatically ignore adjacent delimiters (different than split).
// The next_token method returns character start position (spos) and end
// position (epos) between delimiter runs identifying each token.
// An iterator is used to retrieve each utf8 character to be checked.
// The spaces parameter identifies a run of delimiters (or not delimiters).
struct base_tokenator
{
custring_view* d_delimiter{nullptr};
__device__ bool is_delimiter(Char ch)
{
if( !d_delimiter )
return (ch <= ' '); // all ascii whitespace
return d_delimiter->find(ch)>=0;
}
__device__ bool next_token( custring_view* dstr, bool& spaces, custring_view::iterator& itr, int& spos, int& epos )
{
if( spos >= dstr->chars_count() )
return false;
for( ; itr != dstr->end(); ++itr )
{
Char ch = *itr;
if( spaces == is_delimiter(ch) )
{
if( spaces )
spos = itr.position()+1;
else
epos = itr.position()+1;
continue;
}
spaces = !spaces;
if( spaces )
{
epos = itr.position();
break;
}
}
return spos < epos;
}
};
//
struct tokenize_fn : base_tokenator
{
custring_view_array d_strings;
size_t* d_counts;
size_t* d_offsets;
thrust::pair<const char*,size_t>* d_tokens;
tokenize_fn( custring_view_array d_strings, custring_view* d_delimiter, size_t* d_counts, size_t* d_offsets=nullptr, thrust::pair<const char*,size_t>* d_tokens=nullptr )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_counts(d_counts), d_offsets(d_offsets), d_tokens(d_tokens) {}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
thrust::pair<const char*,size_t>* dstr_tokens = nullptr;
if( d_tokens )
{
if( d_counts[idx]==0 )
return;
dstr_tokens = d_tokens + d_offsets[idx];
}
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars, tidx = 0;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
if( dstr_tokens )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
dstr_tokens[tidx].first = dstr->data() + spos_bo;
dstr_tokens[tidx].second = (epos_bo-spos_bo);
}
spos = epos + 1;
epos = nchars;
++itr;
++tidx;
}
d_counts[idx] = tidx;
}
};
NVStrings* NVText::tokenize(NVStrings& strs, const char* delimiter)
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings,d_delimiter,d_counts));
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings, d_delimiter, d_counts, d_offsets, d_tokens));
//
RMM_FREE(d_delimiter,0);
// build strings object from tokens elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,tokens_count);
}
// same but with multiple delimiters
NVStrings* NVText::tokenize(NVStrings& strs, NVStrings& delims)
{
unsigned int delims_count = delims.size();
if( delims_count==0 )
return NVText::tokenize(strs);
auto execpol = rmm::exec_policy(0);
rmm::device_vector<custring_view*> delimiters(delims_count,nullptr);
custring_view** d_delimiters = delimiters.data().get();
delims.create_custring_index(d_delimiters);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiters, delims_count, d_counts] __device__(unsigned int idx){
custring_view* d_string = d_strings[idx];
if( !d_string )
return;
int tokens = 1;
const char* sptr = d_string->data();
const char* eptr = sptr + d_string->size();
while( sptr < eptr )
{
int incr = 1;
for( int didx=0; didx < delims_count; ++didx )
{
custring_view* d_delim = d_delimiters[didx];
if( !d_delim || d_delim->empty() )
continue;
if( d_delim->compare(sptr,d_delim->size()) !=0 )
continue;
++tokens;
incr = d_delim->size();
break;
}
sptr += incr;
}
d_counts[idx] = tokens;
});
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
size_t* d_offsets = offsets.data().get();
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiters, delims_count, d_counts, d_offsets, d_tokens] __device__(unsigned int idx) {
custring_view* d_string = d_strings[idx];
if( !d_string )
return;
size_t token_count = d_counts[idx];
if( token_count==0 )
return;
auto dstr_tokens = d_tokens + d_offsets[idx];
const char* data = d_string->data();
const char* sptr = data;
auto size = d_string->size();
const char* eptr = sptr + size;
int spos = 0, tidx = 0;
while( sptr < eptr )
{
int incr = 1;
for( int didx=0; didx < delims_count; ++didx )
{
custring_view* d_delim = d_delimiters[didx];
if( !d_delim || d_delim->empty() )
continue;
if( d_delim->compare(sptr,d_delim->size()) !=0 )
continue;
// found delimiter
dstr_tokens[tidx].first = data + spos;
dstr_tokens[tidx].second = ((sptr - data) - spos);
++tidx;
incr = d_delim->size();
spos = (sptr - data) + incr;
break;
}
sptr += incr;
}
if( (tidx < token_count) && (spos < size) )
{
dstr_tokens[tidx].first = data + spos;
dstr_tokens[tidx].second = size - spos;
}
});
// remove any empty strings -- occurs if two delimiters are next to each other
auto end = thrust::remove_if(execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t> w ) { return w.second==0; } );
unsigned int nsize = (unsigned int)(end - d_tokens); // new token count
//
// build strings object from tokens elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,nsize);
}
//
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter)
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// count how many tokens in each string
rmm::device_vector<size_t> counts(count,0);
size_t* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings,d_delimiter,d_counts));
// compute the total number of tokens
size_t tokens_count = thrust::reduce(execpol->on(0), counts.begin(), counts.end());
// create token-index offsets
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan( execpol->on(0), counts.begin(), counts.end(), offsets.begin() );
// build a list of pointers to each token
rmm::device_vector< thrust::pair<const char*,size_t> > tokens(tokens_count);
thrust::pair<const char*,size_t>* d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokenize_fn(d_strings, d_delimiter, d_counts, d_offsets, d_tokens));
//
RMM_FREE(d_delimiter,0);
thrust::sort( execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs) {
return custr::compare(lhs.first,(unsigned)lhs.second,rhs.first,(unsigned)rhs.second)<0;
});
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_tokens, d_tokens + tokens_count,
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
unsigned int newsize = (unsigned int)(newend - d_tokens); // new size
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_tokens,newsize);
}
// Your basic token counter
struct nvtext_token_counter : base_tokenator
{
custring_view_array d_strings;
unsigned int* d_counts;
//
nvtext_token_counter( custring_view_array d_strings, custring_view* d_delimiter, unsigned int* d_counts )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_counts(d_counts) {}
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
unsigned int token_count = 0;
if( dstr )
{
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
++token_count;
spos = epos + 1; // setup
epos = nchars; // for next
++itr; // token
}
}
d_counts[idx] = token_count;
}
};
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
custring_view* d_delimiter = custring_from_host(delimiter);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
d_counts = device_alloc<unsigned int>(count,0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
nvtext_token_counter{d_strings,d_delimiter,d_counts});
//
if( !bdevmem )
{
CUDA_TRY( cudaMemcpyAsync(results,d_counts,count*sizeof(unsigned int),cudaMemcpyDeviceToHost))
RMM_FREE(d_counts,0);
}
RMM_FREE(d_delimiter,0);
return 0;
}
//
struct tokens_counts_fn : base_tokenator
{
custring_view_array d_strings;
custring_view_array d_tokens;
unsigned int token_count;
int* d_token_indexes;
unsigned int* d_results;
tokens_counts_fn( custring_view_array d_strings, custring_view_array d_tokens, unsigned int token_count,
int* d_token_indexes, custring_view* d_delimiter, unsigned int* d_results )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_tokens(d_tokens), token_count(token_count),
d_token_indexes(d_token_indexes), d_results(d_results) {}
__device__ int match_tokens( custring_view* dstr, int spos_bo, int epos_bo )
{
int length = epos_bo - spos_bo;
for( int tidx=0; tidx < token_count; ++tidx )
{
custring_view* d_token = d_tokens[tidx];
if( d_token &&
(length==d_token->size()) &&
(d_token->compare(dstr->data()+spos_bo,length)==0) )
{
return tidx;
}
}
return -1;
}
__device__ int match_sorted_tokens( custring_view* dstr, int spos_bo, int epos_bo )
{
int left = 0, right = token_count -1;
int length = epos_bo - spos_bo;
while( left <= right )
{
int tidx = (left + right)/2;
custring_view* d_token = d_tokens[tidx];
int cmp = (d_token ? d_token->compare(dstr->data()+spos_bo,length) : -1);
if( cmp < 0 )
left = tidx + 1;
else if( cmp > 0 )
right = tidx - 1;
else
return d_token_indexes[tidx];
}
return -1;
}
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
unsigned int* dresult = d_results + (idx*token_count);
// initialize to zero
for( int tidx=0; tidx < token_count; ++tidx )
dresult[tidx] = 0;
if( !dstr )
return;
bool spaces = true;
int nchars = dstr->chars_count();
int spos = 0, epos = nchars;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
// check against all the tokens
int tidx = match_sorted_tokens(dstr,spos_bo,epos_bo);
if( tidx >= 0 )
++dresult[tidx];
spos = epos + 1;
epos = nchars;
++itr;
}
}
};
unsigned int NVText::tokens_counts( NVStrings& strs, NVStrings& tkns, const char* delimiter, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_results = results;
if( !todevice )
d_results = device_alloc<unsigned int>(tcount*count,0);
custring_view* d_delimiter = custring_from_host(delimiter);
// get the strings
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
// sort the tokens
rmm::device_vector<int> token_indexes(tcount);
thrust::sequence(execpol->on(0), token_indexes.begin(), token_indexes.end());
int* d_token_indexes = token_indexes.data().get();
thrust::sort_by_key(execpol->on(0), d_tokens, d_tokens+tcount, d_token_indexes,
[] __device__( custring_view*& lhs, custring_view*& rhs ) {
if( lhs==0 || rhs==0 )
return (rhs!=0); // null < non-null
return lhs->compare(*rhs)<0;
});
// count the tokens
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
tokens_counts_fn(d_strings, d_tokens, tcount, d_token_indexes, d_delimiter, d_results) );
// done
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_results,sizeof(unsigned int)*count*tcount,cudaMemcpyDeviceToHost))
RMM_FREE(d_results,0);
}
return 0;
}
struct replace_tokens_fn : base_tokenator
{
custring_view_array d_strings;
custring_view_array d_tokens;
unsigned int token_count;
custring_view_array d_repls;
unsigned int repl_count;
custring_view* d_delimiter;
size_t* d_offsets;
bool bcompute_size_only;
char* d_buffer;
thrust::pair<const char*,size_t>* d_indexes;
replace_tokens_fn( custring_view_array d_strings, custring_view_array d_tokens, unsigned int token_count,
custring_view_array d_repls, unsigned int repl_count, custring_view* d_delimiter,
size_t* d_offsets, bool bcompute_size_only=true, char* d_buffer=nullptr,
thrust::pair<const char*,size_t>* d_indexes=nullptr )
: base_tokenator{d_delimiter}, d_strings(d_strings), d_tokens(d_tokens), token_count(token_count),
d_repls(d_repls), repl_count(repl_count), d_offsets(d_offsets), bcompute_size_only(bcompute_size_only),
d_buffer(d_buffer), d_indexes(d_indexes) {}
//
__device__ void operator()(unsigned int idx)
{
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* sptr = dstr->data(); // input buffer
char* buffer = nullptr; // output buffer
if( !bcompute_size_only )
buffer = d_buffer + d_offsets[idx];
char* optr = buffer; // running output pointer
int nbytes = dstr->size(), nchars = dstr->chars_count();
int lpos = 0, spos = 0, epos = nchars;
bool spaces = true;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
// check against all the tokens
for( int tidx=0; tidx < token_count; ++tidx )
{
custring_view* d_token = d_tokens[tidx];
int length = epos_bo - spos_bo;
if( d_token &&
(length==d_token->size()) &&
(d_token->compare(dstr->data()+spos_bo,length)==0) )
{
custring_view* d_repl = (repl_count==1 ? d_repls[0] : d_repls[tidx]);
nbytes += (d_repl ? d_repl->size():0) - length;
if( !bcompute_size_only )
{
copy_and_incr(optr,sptr+lpos,spos_bo-lpos);
if( d_repl )
copy_and_incr(optr,d_repl->data(),d_repl->size());
lpos = epos_bo;
}
itr = custring_view::iterator(*dstr,epos);
break;
}
}
spos = epos + 1;
epos = nchars;
itr++;
}
// set result
if( bcompute_size_only )
d_offsets[idx] = nbytes;
else
{
memcpy( optr, sptr+lpos, dstr->size()-lpos );
d_indexes[idx].first = buffer;
d_indexes[idx].second = nbytes;
}
}
};
NVStrings* NVText::replace_tokens(NVStrings& strs, NVStrings& tgts, NVStrings& repls, const char* delimiter)
{
if( strs.size()==0 || tgts.size()==0 )
return strs.copy();
if( (repls.size() > 1) && (repls.size()!=tgts.size()) )
throw std::runtime_error("replace-tokens tokens and replacements must have the same number of strings");
auto execpol = rmm::exec_policy(0);
// go get the strings for all the parameters
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
unsigned int token_count = tgts.size();
rmm::device_vector<custring_view*> tokens(token_count,nullptr);
custring_view** d_tokens = tokens.data().get();
tgts.create_custring_index(d_tokens);
unsigned int repl_count = repls.size();
rmm::device_vector<custring_view*> repl_strings(repl_count,nullptr);
custring_view** d_repls = repl_strings.data().get();
repls.create_custring_index(d_repls);
custring_view* d_delimiter = custring_from_host(delimiter);
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
char* d_buffer = nullptr;
// calculate size of the output, allocate and then do the operation
enum scan_and_operate { scan, operate };
auto op = scan;
while(true)
{
// 1st pass just calculates; 2nd pass will do the replace
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
replace_tokens_fn{d_strings, d_tokens, token_count, d_repls, repl_count, d_delimiter, d_offsets, (op==scan), d_buffer, d_indexes} );
if( op==operate )
break; // done after 2nd pass
op = operate;
// allocate memory for the output
size_t buffer_size = thrust::reduce(execpol->on(0), d_offsets, d_offsets+count);
if( buffer_size==0 )
return nullptr;
d_buffer = device_alloc<char>(buffer_size,0);
// convert lengths to offsets
thrust::exclusive_scan( execpol->on(0), offsets.begin(), offsets.end(), offsets.begin() );
}
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// Kernel operator for normalizing whitespace
struct normalize_spaces_fn : base_tokenator
{
custring_view_array d_strings;
size_t* d_offsets;
bool bcompute_size_only{true};
char* d_buffer;
thrust::pair<const char*,size_t>* d_indexes;
normalize_spaces_fn( custring_view_array d_strings, size_t* d_offsets,
bool bcompute_size_only=true, char* d_buffer=nullptr,
thrust::pair<const char*,size_t>* d_indexes=nullptr)
: d_strings(d_strings), d_offsets(d_offsets), bcompute_size_only(bcompute_size_only),
d_buffer(d_buffer), d_indexes(d_indexes) {}
//
__device__ void operator()(unsigned int idx)
{
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* sptr = dstr->data(); // input buffer
char* buffer = nullptr; // output buffer
if( !bcompute_size_only )
buffer = d_buffer + d_offsets[idx];
char* optr = buffer; // running output pointer
int nbytes = 0, spos = 0, epos = dstr->chars_count();
bool spaces = true;
auto itr = dstr->begin();
while( next_token(dstr,spaces,itr,spos,epos) )
{
int spos_bo = dstr->byte_offset_for(spos); // convert char pos
int epos_bo = dstr->byte_offset_for(epos); // to byte offset
nbytes += epos_bo - spos_bo + 1; // include space per token
if( !bcompute_size_only )
{
if( optr != buffer )
copy_and_incr(optr,(char*)" ",1); // add just one space
copy_and_incr(optr,sptr+spos_bo,epos_bo-spos_bo); // copy token
}
spos = epos + 1;
epos = dstr->chars_count();
itr++; // skip the first whitespace
}
// set result (remove extra space for last token)
if( bcompute_size_only )
d_offsets[idx] = (nbytes ? nbytes-1:0);
else
{
d_indexes[idx].first = buffer;
d_indexes[idx].second = (nbytes ? nbytes-1:0);
}
}
};
// Replaces a run of whitespace with a single space character.
// Also trims whitespace from the beginning and end of each string.
NVStrings* NVText::normalize_spaces(NVStrings& strs)
{
if( strs.size()==0 )
return strs.copy();
auto execpol = rmm::exec_policy(0);
// go get the strings for all the parameters
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// create working variables/memory
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
char* d_buffer = nullptr;
// calculate size of the output, allocate and then do the operation
enum scan_and_operate { scan, operate };
auto op = scan;
while(true)
{
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
normalize_spaces_fn{d_strings, d_offsets, (op==scan), d_buffer, d_indexes} );
if( op==operate )
break; // done after 2nd pass
op = operate;
// allocate memory for the output
size_t buffer_size = thrust::reduce(execpol->on(0), d_offsets, d_offsets+count);
if( buffer_size==0 )
return nullptr;
d_buffer = device_alloc<char>(buffer_size,0);
// convert lengths to offsets
thrust::exclusive_scan( execpol->on(0), offsets.begin(), offsets.end(), offsets.begin() );
}
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
|
81666b58ccbd7fb66cb0917d520f3f346280bb45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void labeling(const char *text, int *pos, int text_size){
int index = threadIdx.x*blockDim.y+threadIdx.y + blockDim.x*blockDim.y*(gridDim.y*blockIdx.x + blockIdx.y);
if (index >= text_size) {
return;
}
pos[index] = 0;
if (text[index] <= ' ')
return ;
for (int k = index; k >= 0; k--) {
if (text[k] <= ' ') {
pos[index] = index - k;
return;
}
}
pos[index] = index+1;
} | 81666b58ccbd7fb66cb0917d520f3f346280bb45.cu | #include "includes.h"
__global__ void labeling(const char *text, int *pos, int text_size){
int index = threadIdx.x*blockDim.y+threadIdx.y + blockDim.x*blockDim.y*(gridDim.y*blockIdx.x + blockIdx.y);
if (index >= text_size) {
return;
}
pos[index] = 0;
if (text[index] <= ' ')
return ;
for (int k = index; k >= 0; k--) {
if (text[k] <= ' ') {
pos[index] = index - k;
return;
}
}
pos[index] = index+1;
} |
50227969d41d97e33d6c6f526075b8d0d921cfa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define inf_bs 32
#define max_bs 64
/* ====================================================================== */
/* inf-norm */
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n % inf_bs == 0 and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each.
*/
__global__ void
dlansy_inf_kernel_special_l(
int n, const double* A, int lda, double *dwork )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
A += ind;
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
// loop over all 32x32 blocks left of the diagonal block
for(int i=0; i < break_d; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
// 32x4 threads cooperatively load 32x32 diagonal block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
// symmetrize block
// TODO make diagonal element real
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else
la[tx][i] = la[tx][i]; // TODO: not needed
}
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
// loop over all 32x32 blocks below diagonal block
for(int i=break_d; i < n; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
}
// store partial sums into shared memory
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
// 32x1 threads compute final result of each row
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower */
__global__ void
dlansy_inf_kernel_generic_l(
int n, const double* A, int lda, double *dwork,
int n_full_block, int n_mod_bs )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
if ( blockIdx.x == n_full_block ) {
/************************************************************************
-- Last (partial) block --
-- We will do something unusual here
-- Threads past end of matrix (i.e., ind >= n) are redundantly assigned
-- the last row (n-1). At the end, those results are ignored -- only
-- results for ind < n are saved into dwork.
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < n_mod_bs ) {
A += ( blockIdx.x*inf_bs + tx );
}
else {
A += ( blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
/* we don't need to make results for rows >= n zero, as those computation will be discarded. */
if ( ty == 0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j;
int count = 1; // TODO don't need initialization
if ( tx < n_mod_bs )
count = tx;
else
count = n_mod_bs;
for(j=0; j <= count; j++) {
res += fabs( A[j*lda] );
}
A += tx*lda;
count = 1;
for( ; j < n_mod_bs; j++) {
res += fabs( A[count] );
count++;
}
}
__syncthreads();
la[tx][ty]= MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
if ( tx < n_mod_bs )
dwork[ind] = res;
}
}
else {
/*-----------------------------------
-- All the blocks but the last one --
-------------------------------------*/
A += ind;
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else
la[tx][i] = la[tx][i]; // TODO: not needed
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
n -= n_mod_bs;
/*-----------------------------
Go Down
-------------------------------*/
for(int i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
}
/*---------------------------------------------
doing n_mod_bs stuffs here.
Symmetric is giving us benefit .. true
-----------------------------------------------*/
A -= tx;
if ( tx < n_mod_bs ) {
A += tx;
}
else {
A += (n_mod_bs-1); /* Same as above */
}
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs )
la[ty+j][tx] = A[j*lda]; //MAGMA_D_MUL( MAGMA_D_ONE, A[j*lda] ); // huh? just A[j*lda]?
else
la[ty+j][tx] = MAGMA_D_ZERO; //MAGMA_D_MUL( MAGMA_D_ZERO, A[j*lda] ); // huh? just 0?
}
__syncthreads();
/*----------------------------------------
What about doing some Zeroing here?
instead of zeroing before?
-----------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper */
__global__ void
dlansy_inf_kernel_generic_u(
int n, const double* A, int lda, double *dwork,
int n_full_block, int n_mod_bs )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
int blockIdxx = blockIdx.x;
if ( blockIdx.x == n_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx;
A += lda*(n-1);
if ( tx < n_mod_bs ) {
A += tx;
}
else {
A += (n_mod_bs - 1);
}
A -= ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
/* we don't need to make zero, as those computation will be discarded. */
if ( ty == 0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j;
int count = 1;
if ( tx < n_mod_bs )
count = n_mod_bs- tx;
else
count = n_mod_bs;
for(j=0; j < count; j++) {
res += fabs( A[-j*lda] );
}
A -= (count-1)*lda;
count = 1;
for( ; j < n_mod_bs; j++) {
res += fabs( A[-count] );
count++;
}
}
else {
}
__syncthreads();
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
if ( tx < n_mod_bs )
dwork[ind] = res;
}
}
else {
/*-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
-------------------------------------*/
ind = blockIdx.x*inf_bs + tx + n_mod_bs;
const double *A1 = A;
A += lda*(n-1);
A += ind;
A -= ty * lda;
int break_d = (n/inf_bs - blockIdxx - 1)*inf_bs;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][31-ty-j] = A[ -j * lda];
}
A -= inf_bs;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else {
la[tx][i] = la[tx][i]; // TODO: not needed
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
n -= n_mod_bs;
/*-----------------------------
Go Up
-------------------------------*/
int i;
for( i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[- j * lda];
}
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs ( la[31-tx][j+ty*8] );
}
__syncthreads();
}
/*---------------------------------------------
doing n_mod_bs stuffs here.
Symmetric is giving us benefit .. true
Do the other way please......
see dlansy_inf_kernel_generic_l code above
TODO compare performance with lower case and use that implementation if better.
-----------------------------------------------*/
A1 = A1 + n_mod_bs*lda + tx*lda;
if ( ty == 0 ) {
for( int j = 0; j < n_mod_bs; j++) {
res += fabs( A1[ j + lda * blockIdx.x * inf_bs ] );
}
}
__syncthreads();
la[tx][ty]= MAGMA_D_MAKE( res, 0);
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n % inf_bs == 0 and A is stored upper */
__global__ void
dlansy_inf_kernel_special_u(
int n, const double* A, int lda, double *dwork )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A += lda*(n-1);
__shared__ double la[inf_bs][inf_bs+1];
A += ind;
A -= ty * lda;
int break_d = (n / inf_bs - blockIdx.x-1 )*inf_bs;
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[tx][31-ty-j] = A[ -j * lda];
/* Look at the indexing changes */
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else {
la[tx][i] = la[tx][i]; // TODO: not needed
}
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
for(int i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[ -j * lda];
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[31-tx][j+ty*8] );
}
__syncthreads();
}
la[tx][ty]= MAGMA_D_MAKE( res, 0. );
__syncthreads();
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
dlansy_inf(
magma_uplo_t uplo, int n, const double *A, int lda, double *dwork )
{
/* Note: The UPLO = 'U' Version can be optimized more. */
int blocks = (n - 1)/inf_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(inf_bs, 4, 1);
if ( n % inf_bs == 0 ) {
if ( uplo == 'L' || uplo == 'l') {
hipLaunchKernelGGL(( dlansy_inf_kernel_special_l), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork );
}
else {
hipLaunchKernelGGL(( dlansy_inf_kernel_special_u), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork);
}
}
else {
int n_full_block = (n - n % inf_bs) /inf_bs;
int n_mod_bs = n % inf_bs;
if ( uplo == 'L' || uplo == 'l') {
hipLaunchKernelGGL(( dlansy_inf_kernel_generic_l), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
hipLaunchKernelGGL(( dlansy_inf_kernel_generic_u), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
}
/* ====================================================================== */
/* max-norm */
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
dlansy_max_kernel_l(
int n, const double* A, int lda, double *dwork )
{
int tx = threadIdx.x;
int ind = blockIdx.x * max_bs + tx;
double res = 0., res1;
int break_d = blockIdx.x * max_bs;
if (ind < n) {
A += ind;
// loop over blocks left of diagonal block
for(int i=0; i < break_d; i += max_bs ) {
#pragma unroll 8
for(int j=0; j < max_bs; j++) {
res1 = fabs( A[j*lda] );
res = fmax( res, res1 );
}
A += lda*max_bs;
}
// process diagonal block
for(int j=0; j <= tx; j++) {
res1 = fabs( A[j*lda] );
res = fmax( res, res1 );
}
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper.
* TODO compare performance with lower case and use that implementation if better. */
__global__ void
dlansy_max_kernel_u(
int n, const double* A, int lda, double *dwork )
{
int ind = blockIdx.x * max_bs + threadIdx.x;
double res = 0.;
A += ind;
if (ind < n) {
for(int j=n-1; j >= ind; j--)
res = fmax( res, fabs( A[j*lda] ) );
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
dlansy_max(
magma_uplo_t uplo, int n, const double *A, int lda, double *dwork )
{
int blocks = (n - 1)/max_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(max_bs, 1, 1);
if ( uplo == 'L' || uplo == 'l' ) {
hipLaunchKernelGGL(( dlansy_max_kernel_l), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork );
}
else {
hipLaunchKernelGGL(( dlansy_max_kernel_u), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, dwork );
}
}
/* ====================================================================== */
/*
Purpose
=======
DLANSY returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real symmetric matrix A.
DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
(
( norm1(A), NORM = '1', 'O' or 'o' ** supported only for CUDA_ARCH >= 200
(
( normI(A), NORM = 'I' or 'i' ** supported only for CUDA_ARCH >= 200
(
( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
Returns DLANSY < 0: if DLANSY = -i, the i-th argument had an illegal value.
Arguments:
==========
NORM (input) CHARACTER*1
Specifies the value to be returned in DLANSY as described above.
UPLO (input) CHARACTER*1
Specifies whether the upper or lower triangular part of the
symmetric matrix A is to be referenced.
= 'U': Upper triangular part of A is referenced
= 'L': Lower triangular part of A is referenced
N (input) INTEGER
The order of the matrix A. N >= 0. When N = 0, DLANSY is
set to zero.
A (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N)
The symmetric matrix A. If UPLO = 'U', the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = 'L', the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(N,1).
DWORK (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is only required
for norm1 and normI.
*/
extern "C" double
magmablas_dlansy(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
const double *A, magma_int_t lda, double *dwork )
{
magma_int_t info = 0;
magma_int_t arch = magma_getdevice_arch();
// 1-norm == inf-norm since A is symmetric
bool inf_norm = (norm == 'I' || norm == 'i' || norm == '1' || norm == 'O' || norm == 'o');
bool max_norm = (norm == 'M' || norm == 'm');
if ( ! max_norm && (! inf_norm || arch < 200) )
info = -1;
else if ( uplo != 'u' && uplo != 'U' && uplo != 'l' && uplo != 'L' )
info = -2;
else if ( n < 0 )
info = -3;
else if ( lda < n )
info = -5;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
double res = 0;
if ( inf_norm ) {
dlansy_inf( uplo, n, A, lda, dwork );
int i = hipblasIdamax( n, dwork, 1 ) - 1;
hipMemcpy( &res, &dwork[i], sizeof(double), hipMemcpyDeviceToHost );
}
else if ( max_norm ) {
dlansy_max( uplo, n, A, lda, dwork );
int i = hipblasIdamax( n, dwork, 1 ) - 1;
hipMemcpy( &res, &dwork[i], sizeof(double), hipMemcpyDeviceToHost );
}
return res;
}
| 50227969d41d97e33d6c6f526075b8d0d921cfa6.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#define inf_bs 32
#define max_bs 64
/* ====================================================================== */
/* inf-norm */
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n % inf_bs == 0 and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each.
*/
__global__ void
dlansy_inf_kernel_special_l(
int n, const double* A, int lda, double *dwork )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
A += ind;
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
// loop over all 32x32 blocks left of the diagonal block
for(int i=0; i < break_d; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
// 32x4 threads cooperatively load 32x32 diagonal block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
// symmetrize block
// TODO make diagonal element real
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else
la[tx][i] = la[tx][i]; // TODO: not needed
}
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
// loop over all 32x32 blocks below diagonal block
for(int i=break_d; i < n; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
// compute 4 partial sums of each row
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
}
// store partial sums into shared memory
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
// 32x1 threads compute final result of each row
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower */
__global__ void
dlansy_inf_kernel_generic_l(
int n, const double* A, int lda, double *dwork,
int n_full_block, int n_mod_bs )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
if ( blockIdx.x == n_full_block ) {
/************************************************************************
-- Last (partial) block --
-- We will do something unusual here
-- Threads past end of matrix (i.e., ind >= n) are redundantly assigned
-- the last row (n-1). At the end, those results are ignored -- only
-- results for ind < n are saved into dwork.
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
if ( tx < n_mod_bs ) {
A += ( blockIdx.x*inf_bs + tx );
}
else {
A += ( blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
/* we don't need to make results for rows >= n zero, as those computation will be discarded. */
if ( ty == 0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j;
int count = 1; // TODO don't need initialization
if ( tx < n_mod_bs )
count = tx;
else
count = n_mod_bs;
for(j=0; j <= count; j++) {
res += fabs( A[j*lda] );
}
A += tx*lda;
count = 1;
for( ; j < n_mod_bs; j++) {
res += fabs( A[count] );
count++;
}
}
__syncthreads();
la[tx][ty]= MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
if ( tx < n_mod_bs )
dwork[ind] = res;
}
}
else {
/*-----------------------------------
-- All the blocks but the last one --
-------------------------------------*/
A += ind;
A += ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A += lda*inf_bs;
__syncthreads();
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else
la[tx][i] = la[tx][i]; // TODO: not needed
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
n -= n_mod_bs;
/*-----------------------------
Go Down
-------------------------------*/
for(int i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[j*lda];
A += inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
}
/*---------------------------------------------
doing n_mod_bs stuffs here.
Symmetric is giving us benefit .. true
-----------------------------------------------*/
A -= tx;
if ( tx < n_mod_bs ) {
A += tx;
}
else {
A += (n_mod_bs-1); /* Same as above */
}
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs )
la[ty+j][tx] = A[j*lda]; //MAGMA_D_MUL( MAGMA_D_ONE, A[j*lda] ); // huh? just A[j*lda]?
else
la[ty+j][tx] = MAGMA_D_ZERO; //MAGMA_D_MUL( MAGMA_D_ZERO, A[j*lda] ); // huh? just 0?
}
__syncthreads();
/*----------------------------------------
What about doing some Zeroing here?
instead of zeroing before?
-----------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
__syncthreads();
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper */
__global__ void
dlansy_inf_kernel_generic_u(
int n, const double* A, int lda, double *dwork,
int n_full_block, int n_mod_bs )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
__shared__ double la[inf_bs][inf_bs+1];
int blockIdxx = blockIdx.x;
if ( blockIdx.x == n_full_block ) {
/************************************************************************
-- Last block --
-- We will do something unusual here
-- For sufficiently large matrix the overhead will be very low
*************************************************************************/
ind = tx;
A += lda*(n-1);
if ( tx < n_mod_bs ) {
A += tx;
}
else {
A += (n_mod_bs - 1);
}
A -= ty * lda;
int break_d = blockIdx.x*inf_bs;
/*----------------------------
Go Right
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
/* we don't need to make zero, as those computation will be discarded. */
if ( ty == 0 ) {
/*--------------------------------------------
he will compute the triangular parts
others will be waiting with values.
-----------------------------------------------*/
int j;
int count = 1;
if ( tx < n_mod_bs )
count = n_mod_bs- tx;
else
count = n_mod_bs;
for(j=0; j < count; j++) {
res += fabs( A[-j*lda] );
}
A -= (count-1)*lda;
count = 1;
for( ; j < n_mod_bs; j++) {
res += fabs( A[-count] );
count++;
}
}
else {
}
__syncthreads();
la[tx][ty] = MAGMA_D_MAKE( res, 0. );
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
if ( tx < n_mod_bs )
dwork[ind] = res;
}
}
else {
/*-----------------------------------
-- All the blocks but the last one --
-- By the way this code can be optimized more.
-------------------------------------*/
ind = blockIdx.x*inf_bs + tx + n_mod_bs;
const double *A1 = A;
A += lda*(n-1);
A += ind;
A -= ty * lda;
int break_d = (n/inf_bs - blockIdxx - 1)*inf_bs;
/*----------------------------
Go Left
-------------------------------*/
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
/*------------------------------------
Diagonal
Copy + Transpose lower triangle
--------------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][31-ty-j] = A[ -j * lda];
}
A -= inf_bs;
__syncthreads();
/*--------------------------------------------
Mirror Upper Triangle to Lower triangle
---------------------------------------------*/
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else {
la[tx][i] = la[tx][i]; // TODO: not needed
}
}
__syncthreads();
/*--------------------------------
Do diagonal Computation
-----------------------------------*/
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
n -= n_mod_bs;
/*-----------------------------
Go Up
-------------------------------*/
int i;
for( i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[- j * lda];
}
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs ( la[31-tx][j+ty*8] );
}
__syncthreads();
}
/*---------------------------------------------
doing n_mod_bs stuffs here.
Symmetric is giving us benefit .. true
Do the other way please......
see dlansy_inf_kernel_generic_l code above
TODO compare performance with lower case and use that implementation if better.
-----------------------------------------------*/
A1 = A1 + n_mod_bs*lda + tx*lda;
if ( ty == 0 ) {
for( int j = 0; j < n_mod_bs; j++) {
res += fabs( A1[ j + lda * blockIdx.x * inf_bs ] );
}
}
__syncthreads();
la[tx][ty]= MAGMA_D_MAKE( res, 0);
__syncthreads();
/*--------------------------------------------------------
The leader accumulates all the results from his peer.
----------------------------------------------------------*/
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n % inf_bs == 0 and A is stored upper */
__global__ void
dlansy_inf_kernel_special_u(
int n, const double* A, int lda, double *dwork )
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int ind = blockIdx.x*inf_bs + tx;
double res = 0.;
/*
Reverse Computation ...
- Left
- Triangle
- Up
*/
A += lda*(n-1);
__shared__ double la[inf_bs][inf_bs+1];
A += ind;
A -= ty * lda;
int break_d = (n / inf_bs - blockIdx.x-1 )*inf_bs;
for(int i=0; i < break_d; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[-j*lda];
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < 8; j++) {
res += fabs( la[tx][j+ty*8] );
}
A -= lda*inf_bs;
__syncthreads();
}
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[tx][31-ty-j] = A[ -j * lda];
/* Look at the indexing changes */
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) {
if ( i < tx ) {
la[tx][i] = la[i][tx];
}
else {
la[tx][i] = la[tx][i]; // TODO: not needed
}
}
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[tx][j+ty*8] );
}
break_d += inf_bs;
__syncthreads();
for(int i=break_d; i < n; i += inf_bs ) {
#pragma unroll 8
for(int j=0; j < inf_bs; j += 4)
la[ty+j][tx] = A[ -j * lda];
A -= inf_bs;
__syncthreads();
#pragma unroll 8
for(int j=0; j < inf_bs/4; j++) {
res += fabs( la[31-tx][j+ty*8] );
}
__syncthreads();
}
la[tx][ty]= MAGMA_D_MAKE( res, 0. );
__syncthreads();
if ( ty == 0 ) {
res = res
+ MAGMA_D_REAL( la[tx][1] )
+ MAGMA_D_REAL( la[tx][2] )
+ MAGMA_D_REAL( la[tx][3] );
dwork[ind] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
dlansy_inf(
magma_uplo_t uplo, int n, const double *A, int lda, double *dwork )
{
/* Note: The UPLO = 'U' Version can be optimized more. */
int blocks = (n - 1)/inf_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(inf_bs, 4, 1);
if ( n % inf_bs == 0 ) {
if ( uplo == 'L' || uplo == 'l') {
dlansy_inf_kernel_special_l<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork );
}
else {
dlansy_inf_kernel_special_u<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork);
}
}
else {
int n_full_block = (n - n % inf_bs) /inf_bs;
int n_mod_bs = n % inf_bs;
if ( uplo == 'L' || uplo == 'l') {
dlansy_inf_kernel_generic_l<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
dlansy_inf_kernel_generic_u<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
}
/* ====================================================================== */
/* max-norm */
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
dlansy_max_kernel_l(
int n, const double* A, int lda, double *dwork )
{
int tx = threadIdx.x;
int ind = blockIdx.x * max_bs + tx;
double res = 0., res1;
int break_d = blockIdx.x * max_bs;
if (ind < n) {
A += ind;
// loop over blocks left of diagonal block
for(int i=0; i < break_d; i += max_bs ) {
#pragma unroll 8
for(int j=0; j < max_bs; j++) {
res1 = fabs( A[j*lda] );
res = fmax( res, res1 );
}
A += lda*max_bs;
}
// process diagonal block
for(int j=0; j <= tx; j++) {
res1 = fabs( A[j*lda] );
res = fmax( res, res1 );
}
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper.
* TODO compare performance with lower case and use that implementation if better. */
__global__ void
dlansy_max_kernel_u(
int n, const double* A, int lda, double *dwork )
{
int ind = blockIdx.x * max_bs + threadIdx.x;
double res = 0.;
A += ind;
if (ind < n) {
for(int j=n-1; j >= ind; j--)
res = fmax( res, fabs( A[j*lda] ) );
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
dlansy_max(
magma_uplo_t uplo, int n, const double *A, int lda, double *dwork )
{
int blocks = (n - 1)/max_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(max_bs, 1, 1);
if ( uplo == 'L' || uplo == 'l' ) {
dlansy_max_kernel_l<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork );
}
else {
dlansy_max_kernel_u<<< grid, threads, 0, magma_stream >>>
( n, A, lda, dwork );
}
}
/* ====================================================================== */
/*
Purpose
=======
DLANSY returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real symmetric matrix A.
DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
(
( norm1(A), NORM = '1', 'O' or 'o' ** supported only for CUDA_ARCH >= 200
(
( normI(A), NORM = 'I' or 'i' ** supported only for CUDA_ARCH >= 200
(
( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
Returns DLANSY < 0: if DLANSY = -i, the i-th argument had an illegal value.
Arguments:
==========
NORM (input) CHARACTER*1
Specifies the value to be returned in DLANSY as described above.
UPLO (input) CHARACTER*1
Specifies whether the upper or lower triangular part of the
symmetric matrix A is to be referenced.
= 'U': Upper triangular part of A is referenced
= 'L': Lower triangular part of A is referenced
N (input) INTEGER
The order of the matrix A. N >= 0. When N = 0, DLANSY is
set to zero.
A (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N)
The symmetric matrix A. If UPLO = 'U', the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = 'L', the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(N,1).
DWORK (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is only required
for norm1 and normI.
*/
extern "C" double
magmablas_dlansy(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
const double *A, magma_int_t lda, double *dwork )
{
magma_int_t info = 0;
magma_int_t arch = magma_getdevice_arch();
// 1-norm == inf-norm since A is symmetric
bool inf_norm = (norm == 'I' || norm == 'i' || norm == '1' || norm == 'O' || norm == 'o');
bool max_norm = (norm == 'M' || norm == 'm');
if ( ! max_norm && (! inf_norm || arch < 200) )
info = -1;
else if ( uplo != 'u' && uplo != 'U' && uplo != 'l' && uplo != 'L' )
info = -2;
else if ( n < 0 )
info = -3;
else if ( lda < n )
info = -5;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
double res = 0;
if ( inf_norm ) {
dlansy_inf( uplo, n, A, lda, dwork );
int i = cublasIdamax( n, dwork, 1 ) - 1;
cudaMemcpy( &res, &dwork[i], sizeof(double), cudaMemcpyDeviceToHost );
}
else if ( max_norm ) {
dlansy_max( uplo, n, A, lda, dwork );
int i = cublasIdamax( n, dwork, 1 ) - 1;
cudaMemcpy( &res, &dwork[i], sizeof(double), cudaMemcpyDeviceToHost );
}
return res;
}
|
5bf488ecd19f0673e7fb04847850fe307604759d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
#define GpuErrorCheck(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int DivRound(int n, int m) { return (n + m - 1) / m; }
void CpuSequentialScan(int* h_num, int numCount)
{
// print scan array
int sum = 0;
for (int i = 0; i < numCount; ++i)
{
sum += h_num[i];
if ((i < (numCount - 1)) &&
(i > (numCount - 5)))
{
std::cout << sum << ",";
}
}
sum -= h_num[numCount - 1];
std::cout << "\n\n";
}
void CpuCount(int* num, int numCount)
{
int countTable[16] = { 0 };
for (int i = 0; i < numCount; ++i)
{
++countTable[num[i]];
}
for (int i = 0; i < 16; ++i)
{
std::cout << countTable[i] << ",";
}
std::cout << "\n\n";
}
__inline__ __device__ void Scan(int* lds, int i)
{
int step;
// bottom up
#pragma unroll
for (step = 1; step < 2048; step *= 2)
{
if (i < 1024 / step)
{
int rightIdx = 2047 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 1023)
{
lds[2047] = 0;
}
__syncthreads();
// top down
#pragma unroll
for (step = 1024; step >= 1; step /= 2)
{
if (i < 1024 / step)
{
int rightIdx = 2047 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
}
__global__ void PrefixScanMultiBlock(int* num, int* blockSum)
{
__shared__ int lds[2048];
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
lds[idx1] = num[idx3];
lds[idx2] = num[idx4];
__syncthreads();
Scan(lds, i);
if (i == 1023)
{
blockSum[j] = lds[2047] + num[2048 * j + 2047];
}
num[idx3] = lds[idx1];
num[idx4] = lds[idx2];
}
__global__ void PrefixScanSingleBlock(int* num)
{
__shared__ int lds[2048];
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
lds[idx1] = num[idx3];
lds[idx2] = num[idx4];
__syncthreads();
Scan(lds, i);
num[idx3] = lds[idx1];
num[idx4] = lds[idx2];
}
__global__ void PrefixScanAdd(int* num, int* blockSum)
{
int i = threadIdx.x;
int j = blockIdx.x;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
int blocksum = blockSum[j];
num[idx3] += blocksum;
num[idx4] += blocksum;
}
void GpuScan(int* h_num, int numCount)
{
int* d_num;
int* d_blockSum;
int* h_blockSum;
// malloc and copy
h_blockSum = new int[2048];
GpuErrorCheck(hipMalloc((void**)& d_num, numCount * sizeof(int)));
GpuErrorCheck(hipMemcpy(d_num, h_num, numCount * sizeof(int), hipMemcpyHostToDevice));
GpuErrorCheck(hipMalloc((void**)& d_blockSum, 2048 * sizeof(int)));
GpuErrorCheck(hipMemset(d_blockSum, 0, 2048 * sizeof(int)));
// grid dim, block dim
dim3 gridDim(numCount / 2048, 1, 1);
dim3 blockDim(1024, 1, 1);
// dispatch
PrefixScanMultiBlock << <gridDim, blockDim >> > (d_num, d_blockSum);
PrefixScanSingleBlock << <1, blockDim >> > (d_blockSum);
PrefixScanAdd << <gridDim, blockDim >> > (d_num, d_blockSum);
GpuErrorCheck(hipDeviceSynchronize());
// copy to cpu
GpuErrorCheck(hipMemcpy(h_num, d_num, numCount * sizeof(int), hipMemcpyDeviceToHost));
GpuErrorCheck(hipMemcpy(h_blockSum, d_blockSum, 2048 * sizeof(int), hipMemcpyDeviceToHost));
GpuErrorCheck(hipPeekAtLastError());
// print
for (int i = numCount - 3; i < numCount; ++i)
{
std::cout << h_num[i] << ",";
}
std::cout << "\n\n";
// free
delete h_blockSum;
hipFree(d_num);
hipFree(d_blockSum);
}
__global__ void CountMultiBlock(int* num, int* blockCount, int* numOffset)
{
__shared__ int lds[16];
if (threadIdx.x < 16)
{
lds[threadIdx.x] = 0;
}
__syncthreads();
int old1 = atomicAdd(lds + num[blockIdx.x * 2048 + threadIdx.x * 2], 1);
int old2 = atomicAdd(lds + num[blockIdx.x * 2048 + threadIdx.x * 2 + 1], 1);
numOffset[blockIdx.x * 2048 + threadIdx.x * 2] = old1;
numOffset[blockIdx.x * 2048 + threadIdx.x * 2 + 1] = old2;
__syncthreads();
if (threadIdx.x < 16)
{
blockCount[blockIdx.x * 16 + threadIdx.x] = lds[threadIdx.x];
}
}
__global__ void CountSum(int* blockCount, int* resultCount, int numBlock)
{
const int n = 128;
const int m = 16;
__shared__ int lds[n][m];
int i = threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.x;
lds[i * 2][j] = 0;
lds[i * 2 + 1][j] = 0;
__syncthreads();
if (i * 2 < numBlock)
{
lds[i * 2][j] = blockCount[k * 2048 + (i * 2) * m + j];
lds[i * 2 + 1][j] = blockCount[k * 2048 + (i * 2 + 1) * m + j];
}
__syncthreads();
#pragma unroll
for (int step = 1; step < n; step *= 2)
{
if (i < (n / 2) / step)
{
int leftIdx = 2 * i * step;
int rightIdx = leftIdx + step;
lds[leftIdx][j] += lds[rightIdx][j];
}
__syncthreads();
}
if (i == 0)
{
resultCount[k * 16 + j] = lds[0][j];
}
}
void GpuCount(int* h_num, int* d_num, int* d_orderBuffer, int* d_numOffset, int numCount)
{
int* d_blockCount;
// int* h_blockCount;
int* d_resultCount;
int* h_resultCount;
// malloc and copy
int blockCountSize = DivRound(numCount, 2048) * 16;
//h_blockCount = new int[blockCountSize];
int* h_numOffset = new int[numCount];
h_resultCount = new int[16];
GpuErrorCheck(hipMalloc((void**)& d_blockCount, blockCountSize * sizeof(int)));
GpuErrorCheck(hipMemset(d_blockCount, 0, blockCountSize * sizeof(int)));
GpuErrorCheck(hipMalloc((void**)& d_resultCount, DivRound(DivRound(numCount, 2048), 128) * 16 * sizeof(int)));
GpuErrorCheck(hipMemset(d_resultCount, 0, DivRound(DivRound(numCount, 2048), 128) * 16 * sizeof(int)));
// dispatch
CountMultiBlock << <dim3(DivRound(numCount, 2048), 1, 1), dim3(1024, 1, 1) >> > (d_num, d_blockCount, d_numOffset);
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
if (DivRound(DivRound(numCount, 2048), 128) > 1)
{
CountSum << <dim3(DivRound(DivRound(numCount, 2048), 128), 1, 1), dim3(64, 16, 1) >> > (d_blockCount, d_resultCount, DivRound(numCount, 2048));
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
CountSum << <dim3(DivRound(DivRound(DivRound(numCount, 2048), 128), 128), 1, 1), dim3(64, 16, 1) >> > (d_resultCount, d_orderBuffer, DivRound(DivRound(numCount, 2048), 128));
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
GpuErrorCheck(hipMemcpy(h_resultCount, d_orderBuffer, 16 * sizeof(int), hipMemcpyDeviceToHost));
}
else
{
CountSum << <dim3(DivRound(DivRound(numCount, 2048), 128), 1, 1), dim3(64, 16, 1) >> > (d_blockCount, d_orderBuffer, DivRound(numCount, 2048));
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
GpuErrorCheck(hipMemcpy(h_resultCount, d_orderBuffer, 16 * sizeof(int), hipMemcpyDeviceToHost));
}
// copy to cpu
// GpuErrorCheck(hipMemcpy(h_blockCount, d_blockCount, blockCountSize * sizeof(int), hipMemcpyDeviceToHost));
GpuErrorCheck(hipMemcpy(h_numOffset, d_numOffset, numCount * sizeof(int), hipMemcpyDeviceToHost));
// print
// for (int i = 0; i < blockCountSize; ++i)
// {
// std::cout << h_blockCount[i] << ",";
// }
// std::cout << "\n\n";
// std::cout << "numOffset:\n";
// for (int i = 0; i < numCount; ++i)
// {
// std::cout << h_numOffset[i] << ",";
// }
// std::cout << "\n\n";
std::cout << "resultCount:\n";
for (int i = 0; i < 16; ++i)
{
std::cout << h_resultCount[i] << ",";
}
std::cout << "\n\n";
// free
delete h_resultCount;
delete h_numOffset;
//delete h_blockCount;
hipFree(d_blockCount);
hipFree(d_resultCount);
}
__global__ void PrefixScan16(int* num)
{
int laneId = threadIdx.x;
int v = num[laneId];
int v1 = __shfl_sync(0xffffffff, v, laneId - 1);
v = v1;
if (laneId == 0) { v = 0; }
v1 = __shfl_sync(0xffffffff, v, laneId - 1);
if (laneId > 0) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 2);
if (laneId > 1) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 4);
if (laneId > 3) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 8);
if (laneId > 7) { v += v1; }
num[laneId] = v;
}
__global__ void Reorder(int* input, int* output, int* orderBuffer, int* numOffset)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int v = input[x];
int idx = orderBuffer[v] + numOffset[x];
output[idx] = v;
}
void RadixSort(int* h_num, int numCount)
{
int* d_orderBuffer;
int h_orderBuffer[16];
int* d_numOffset;
int* d_output;
int* h_output = new int[numCount];
int* d_num;
GpuErrorCheck(hipMalloc((void**)& d_num, numCount * sizeof(int)));
GpuErrorCheck(hipMemcpy(d_num, h_num, numCount * sizeof(int), hipMemcpyHostToDevice));
GpuErrorCheck(hipMalloc((void**)& d_orderBuffer, 16 * sizeof(int)));
GpuErrorCheck(hipMemset(d_orderBuffer, 0, 16 * sizeof(int)));
GpuErrorCheck(hipMalloc((void**)& d_numOffset, numCount * sizeof(int)));
GpuErrorCheck(hipMalloc((void**)& d_output, numCount * sizeof(int)));
GpuCount(h_num, d_num, d_orderBuffer, d_numOffset, numCount);
PrefixScan16 << <1, 16 >> > (d_orderBuffer);
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
GpuErrorCheck(hipMemcpy(h_orderBuffer, d_orderBuffer, 16 * sizeof(int), hipMemcpyDeviceToHost));
std::cout << "orderBuffer:\n";
for (int i = 0; i < 16; ++i)
{
std::cout << h_orderBuffer[i] << ",";
}
std::cout << "\n\n";
Reorder << <dim3(DivRound(numCount, 1024), 1, 1), dim3(1024, 1, 1) >> > (d_num, d_output, d_orderBuffer, d_numOffset);
GpuErrorCheck(hipDeviceSynchronize());
GpuErrorCheck(hipPeekAtLastError());
GpuErrorCheck(hipMemcpy(h_output, d_output, numCount * sizeof(int), hipMemcpyDeviceToHost));
std::cout << "sorted:\n";
for (int i = 0; i < numCount; ++i)
{
std::cout << h_output[i] << ",";
}
std::cout << "\n\n";
delete h_output;
hipFree(d_num);
hipFree(d_orderBuffer);
hipFree(d_numOffset);
}
int main()
{
srand(time(NULL));
// create cpu buffer
int numCount = 2048 * 2;
int* h_num = new int[numCount];
for (int i = 0; i < numCount; ++i) { h_num[i] = rand() % 16; }
// radix sort
RadixSort(h_num, numCount);
// cpu count
//CpuCount(h_num, numCount);
// gpu count
//GpuCount(h_num, numCount);
// cpu sequential scan
//CpuSequentialScan(h_num, numCount);
// gpu scan
//GpuScan(h_num, numCount);
// delete
delete h_num;
return 0;
} | 5bf488ecd19f0673e7fb04847850fe307604759d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
#define GpuErrorCheck(ans) { GpuAssert((ans), __FILE__, __LINE__); }
inline void GpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int DivRound(int n, int m) { return (n + m - 1) / m; }
void CpuSequentialScan(int* h_num, int numCount)
{
// print scan array
int sum = 0;
for (int i = 0; i < numCount; ++i)
{
sum += h_num[i];
if ((i < (numCount - 1)) &&
(i > (numCount - 5)))
{
std::cout << sum << ",";
}
}
sum -= h_num[numCount - 1];
std::cout << "\n\n";
}
void CpuCount(int* num, int numCount)
{
int countTable[16] = { 0 };
for (int i = 0; i < numCount; ++i)
{
++countTable[num[i]];
}
for (int i = 0; i < 16; ++i)
{
std::cout << countTable[i] << ",";
}
std::cout << "\n\n";
}
__inline__ __device__ void Scan(int* lds, int i)
{
int step;
// bottom up
#pragma unroll
for (step = 1; step < 2048; step *= 2)
{
if (i < 1024 / step)
{
int rightIdx = 2047 - 2 * i * step;
int leftIdx = rightIdx - step;
lds[rightIdx] += lds[leftIdx];
}
__syncthreads();
}
if (i == 1023)
{
lds[2047] = 0;
}
__syncthreads();
// top down
#pragma unroll
for (step = 1024; step >= 1; step /= 2)
{
if (i < 1024 / step)
{
int rightIdx = 2047 - 2 * i * step;
int leftIdx = rightIdx - step;
int left = lds[leftIdx];
int right = lds[rightIdx];
lds[leftIdx] = right;
lds[rightIdx] = left + right;
}
__syncthreads();
}
}
__global__ void PrefixScanMultiBlock(int* num, int* blockSum)
{
__shared__ int lds[2048];
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
lds[idx1] = num[idx3];
lds[idx2] = num[idx4];
__syncthreads();
Scan(lds, i);
if (i == 1023)
{
blockSum[j] = lds[2047] + num[2048 * j + 2047];
}
num[idx3] = lds[idx1];
num[idx4] = lds[idx2];
}
__global__ void PrefixScanSingleBlock(int* num)
{
__shared__ int lds[2048];
int i = threadIdx.x;
int j = blockIdx.x;
int idx1 = i * 2;
int idx2 = i * 2 + 1;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
lds[idx1] = num[idx3];
lds[idx2] = num[idx4];
__syncthreads();
Scan(lds, i);
num[idx3] = lds[idx1];
num[idx4] = lds[idx2];
}
__global__ void PrefixScanAdd(int* num, int* blockSum)
{
int i = threadIdx.x;
int j = blockIdx.x;
int idx3 = j * 2048 + i * 2;
int idx4 = j * 2048 + i * 2 + 1;
int blocksum = blockSum[j];
num[idx3] += blocksum;
num[idx4] += blocksum;
}
void GpuScan(int* h_num, int numCount)
{
int* d_num;
int* d_blockSum;
int* h_blockSum;
// malloc and copy
h_blockSum = new int[2048];
GpuErrorCheck(cudaMalloc((void**)& d_num, numCount * sizeof(int)));
GpuErrorCheck(cudaMemcpy(d_num, h_num, numCount * sizeof(int), cudaMemcpyHostToDevice));
GpuErrorCheck(cudaMalloc((void**)& d_blockSum, 2048 * sizeof(int)));
GpuErrorCheck(cudaMemset(d_blockSum, 0, 2048 * sizeof(int)));
// grid dim, block dim
dim3 gridDim(numCount / 2048, 1, 1);
dim3 blockDim(1024, 1, 1);
// dispatch
PrefixScanMultiBlock << <gridDim, blockDim >> > (d_num, d_blockSum);
PrefixScanSingleBlock << <1, blockDim >> > (d_blockSum);
PrefixScanAdd << <gridDim, blockDim >> > (d_num, d_blockSum);
GpuErrorCheck(cudaDeviceSynchronize());
// copy to cpu
GpuErrorCheck(cudaMemcpy(h_num, d_num, numCount * sizeof(int), cudaMemcpyDeviceToHost));
GpuErrorCheck(cudaMemcpy(h_blockSum, d_blockSum, 2048 * sizeof(int), cudaMemcpyDeviceToHost));
GpuErrorCheck(cudaPeekAtLastError());
// print
for (int i = numCount - 3; i < numCount; ++i)
{
std::cout << h_num[i] << ",";
}
std::cout << "\n\n";
// free
delete h_blockSum;
cudaFree(d_num);
cudaFree(d_blockSum);
}
__global__ void CountMultiBlock(int* num, int* blockCount, int* numOffset)
{
__shared__ int lds[16];
if (threadIdx.x < 16)
{
lds[threadIdx.x] = 0;
}
__syncthreads();
int old1 = atomicAdd(lds + num[blockIdx.x * 2048 + threadIdx.x * 2], 1);
int old2 = atomicAdd(lds + num[blockIdx.x * 2048 + threadIdx.x * 2 + 1], 1);
numOffset[blockIdx.x * 2048 + threadIdx.x * 2] = old1;
numOffset[blockIdx.x * 2048 + threadIdx.x * 2 + 1] = old2;
__syncthreads();
if (threadIdx.x < 16)
{
blockCount[blockIdx.x * 16 + threadIdx.x] = lds[threadIdx.x];
}
}
__global__ void CountSum(int* blockCount, int* resultCount, int numBlock)
{
const int n = 128;
const int m = 16;
__shared__ int lds[n][m];
int i = threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.x;
lds[i * 2][j] = 0;
lds[i * 2 + 1][j] = 0;
__syncthreads();
if (i * 2 < numBlock)
{
lds[i * 2][j] = blockCount[k * 2048 + (i * 2) * m + j];
lds[i * 2 + 1][j] = blockCount[k * 2048 + (i * 2 + 1) * m + j];
}
__syncthreads();
#pragma unroll
for (int step = 1; step < n; step *= 2)
{
if (i < (n / 2) / step)
{
int leftIdx = 2 * i * step;
int rightIdx = leftIdx + step;
lds[leftIdx][j] += lds[rightIdx][j];
}
__syncthreads();
}
if (i == 0)
{
resultCount[k * 16 + j] = lds[0][j];
}
}
void GpuCount(int* h_num, int* d_num, int* d_orderBuffer, int* d_numOffset, int numCount)
{
int* d_blockCount;
// int* h_blockCount;
int* d_resultCount;
int* h_resultCount;
// malloc and copy
int blockCountSize = DivRound(numCount, 2048) * 16;
//h_blockCount = new int[blockCountSize];
int* h_numOffset = new int[numCount];
h_resultCount = new int[16];
GpuErrorCheck(cudaMalloc((void**)& d_blockCount, blockCountSize * sizeof(int)));
GpuErrorCheck(cudaMemset(d_blockCount, 0, blockCountSize * sizeof(int)));
GpuErrorCheck(cudaMalloc((void**)& d_resultCount, DivRound(DivRound(numCount, 2048), 128) * 16 * sizeof(int)));
GpuErrorCheck(cudaMemset(d_resultCount, 0, DivRound(DivRound(numCount, 2048), 128) * 16 * sizeof(int)));
// dispatch
CountMultiBlock << <dim3(DivRound(numCount, 2048), 1, 1), dim3(1024, 1, 1) >> > (d_num, d_blockCount, d_numOffset);
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
if (DivRound(DivRound(numCount, 2048), 128) > 1)
{
CountSum << <dim3(DivRound(DivRound(numCount, 2048), 128), 1, 1), dim3(64, 16, 1) >> > (d_blockCount, d_resultCount, DivRound(numCount, 2048));
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
CountSum << <dim3(DivRound(DivRound(DivRound(numCount, 2048), 128), 128), 1, 1), dim3(64, 16, 1) >> > (d_resultCount, d_orderBuffer, DivRound(DivRound(numCount, 2048), 128));
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
GpuErrorCheck(cudaMemcpy(h_resultCount, d_orderBuffer, 16 * sizeof(int), cudaMemcpyDeviceToHost));
}
else
{
CountSum << <dim3(DivRound(DivRound(numCount, 2048), 128), 1, 1), dim3(64, 16, 1) >> > (d_blockCount, d_orderBuffer, DivRound(numCount, 2048));
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
GpuErrorCheck(cudaMemcpy(h_resultCount, d_orderBuffer, 16 * sizeof(int), cudaMemcpyDeviceToHost));
}
// copy to cpu
// GpuErrorCheck(cudaMemcpy(h_blockCount, d_blockCount, blockCountSize * sizeof(int), cudaMemcpyDeviceToHost));
GpuErrorCheck(cudaMemcpy(h_numOffset, d_numOffset, numCount * sizeof(int), cudaMemcpyDeviceToHost));
// print
// for (int i = 0; i < blockCountSize; ++i)
// {
// std::cout << h_blockCount[i] << ",";
// }
// std::cout << "\n\n";
// std::cout << "numOffset:\n";
// for (int i = 0; i < numCount; ++i)
// {
// std::cout << h_numOffset[i] << ",";
// }
// std::cout << "\n\n";
std::cout << "resultCount:\n";
for (int i = 0; i < 16; ++i)
{
std::cout << h_resultCount[i] << ",";
}
std::cout << "\n\n";
// free
delete h_resultCount;
delete h_numOffset;
//delete h_blockCount;
cudaFree(d_blockCount);
cudaFree(d_resultCount);
}
__global__ void PrefixScan16(int* num)
{
int laneId = threadIdx.x;
int v = num[laneId];
int v1 = __shfl_sync(0xffffffff, v, laneId - 1);
v = v1;
if (laneId == 0) { v = 0; }
v1 = __shfl_sync(0xffffffff, v, laneId - 1);
if (laneId > 0) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 2);
if (laneId > 1) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 4);
if (laneId > 3) { v += v1; }
v1 = __shfl_sync(0xffffffff, v, laneId - 8);
if (laneId > 7) { v += v1; }
num[laneId] = v;
}
__global__ void Reorder(int* input, int* output, int* orderBuffer, int* numOffset)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int v = input[x];
int idx = orderBuffer[v] + numOffset[x];
output[idx] = v;
}
void RadixSort(int* h_num, int numCount)
{
int* d_orderBuffer;
int h_orderBuffer[16];
int* d_numOffset;
int* d_output;
int* h_output = new int[numCount];
int* d_num;
GpuErrorCheck(cudaMalloc((void**)& d_num, numCount * sizeof(int)));
GpuErrorCheck(cudaMemcpy(d_num, h_num, numCount * sizeof(int), cudaMemcpyHostToDevice));
GpuErrorCheck(cudaMalloc((void**)& d_orderBuffer, 16 * sizeof(int)));
GpuErrorCheck(cudaMemset(d_orderBuffer, 0, 16 * sizeof(int)));
GpuErrorCheck(cudaMalloc((void**)& d_numOffset, numCount * sizeof(int)));
GpuErrorCheck(cudaMalloc((void**)& d_output, numCount * sizeof(int)));
GpuCount(h_num, d_num, d_orderBuffer, d_numOffset, numCount);
PrefixScan16 << <1, 16 >> > (d_orderBuffer);
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
GpuErrorCheck(cudaMemcpy(h_orderBuffer, d_orderBuffer, 16 * sizeof(int), cudaMemcpyDeviceToHost));
std::cout << "orderBuffer:\n";
for (int i = 0; i < 16; ++i)
{
std::cout << h_orderBuffer[i] << ",";
}
std::cout << "\n\n";
Reorder << <dim3(DivRound(numCount, 1024), 1, 1), dim3(1024, 1, 1) >> > (d_num, d_output, d_orderBuffer, d_numOffset);
GpuErrorCheck(cudaDeviceSynchronize());
GpuErrorCheck(cudaPeekAtLastError());
GpuErrorCheck(cudaMemcpy(h_output, d_output, numCount * sizeof(int), cudaMemcpyDeviceToHost));
std::cout << "sorted:\n";
for (int i = 0; i < numCount; ++i)
{
std::cout << h_output[i] << ",";
}
std::cout << "\n\n";
delete h_output;
cudaFree(d_num);
cudaFree(d_orderBuffer);
cudaFree(d_numOffset);
}
int main()
{
srand(time(NULL));
// create cpu buffer
int numCount = 2048 * 2;
int* h_num = new int[numCount];
for (int i = 0; i < numCount; ++i) { h_num[i] = rand() % 16; }
// radix sort
RadixSort(h_num, numCount);
// cpu count
//CpuCount(h_num, numCount);
// gpu count
//GpuCount(h_num, numCount);
// cpu sequential scan
//CpuSequentialScan(h_num, numCount);
// gpu scan
//GpuScan(h_num, numCount);
// delete
delete h_num;
return 0;
} |
7676388ebe7d852dbb52ea81db1a7fafaa4d91a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = x[i] > static_cast<data_t>(0.) ? x[i] : static_cast<data_t>(0.);
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > static_cast<data_t>(0.) ? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_double_backward_kernel(const data_t* out_data,
const data_t* ddx_data,
data_t* ddout_data,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
ddout_data[i] = ddx_data[i] * (out_data[i] > static_cast<data_t>(0.)
? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
CHECK_GPU_INPUT(x);
auto out = paddle::empty_like(x);
PD_CHECK(x.place() == paddle::DefaultGPUPlace());
int64_t numel = x.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_forward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
x.data<data_t>(), out.data<data_t>(), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
CHECK_GPU_INPUT(x);
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(grad_out);
auto grad_x = paddle::empty_like(x);
PD_CHECK(x.place() == paddle::DefaultGPUPlace());
int64_t numel = out.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
std::vector<paddle::Tensor> relu_cuda_double_backward(
const paddle::Tensor& out, const paddle::Tensor& ddx) {
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(ddx);
auto ddout = paddle::empty(out.shape(), out.dtype(), out.place());
int64_t numel = out.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_double_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_double_backward_kernel<data_t>)
, dim3(grid), dim3(block), 0, out.stream(),
out.data<data_t>(),
ddx.data<data_t>(),
ddout.mutable_data<data_t>(out.place()),
numel);
}));
return {ddout};
}
std::vector<paddle::Tensor> relu_cuda_backward_without_x(
const paddle::Tensor& out, const paddle::Tensor& grad_out) {
auto grad_x = paddle::empty(out.shape(), out.dtype(), out.place());
int numel = out.numel();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, out.stream(),
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(out.place()),
numel);
}));
return {grad_x};
}
void relu_cuda_forward_out(const paddle::Tensor& x, paddle::Tensor* out) {
int numel = x.numel();
int block = 512;
int grid = (numel + block - 1) / block;
out->reshape(x.shape());
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_forward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
x.data<data_t>(), out->mutable_data<data_t>(x.place()), numel);
}));
}
void relu_cuda_backward_out(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out,
paddle::Tensor* grad_x) {
int numel = out.numel();
int block = 512;
int grid = (numel + block - 1) / block;
grad_x->reshape(x.shape());
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
hipLaunchKernelGGL(( relu_cuda_backward_kernel<data_t>), dim3(grid), dim3(block), 0, x.stream(),
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x->mutable_data<data_t>(x.place()),
numel);
}));
}
| 7676388ebe7d852dbb52ea81db1a7fafaa4d91a1.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
#define CHECK_GPU_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
template <typename data_t>
__global__ void relu_cuda_forward_kernel(const data_t* x,
data_t* y,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
y[i] = x[i] > static_cast<data_t>(0.) ? x[i] : static_cast<data_t>(0.);
}
}
template <typename data_t>
__global__ void relu_cuda_backward_kernel(const data_t* dy,
const data_t* y,
data_t* dx,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
dx[i] = dy[i] * (y[i] > static_cast<data_t>(0.) ? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
template <typename data_t>
__global__ void relu_cuda_double_backward_kernel(const data_t* out_data,
const data_t* ddx_data,
data_t* ddout_data,
int64_t num) {
int64_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t i = gid; i < num; i += blockDim.x * gridDim.x) {
ddout_data[i] = ddx_data[i] * (out_data[i] > static_cast<data_t>(0.)
? static_cast<data_t>(1.)
: static_cast<data_t>(0.));
}
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
CHECK_GPU_INPUT(x);
auto out = paddle::empty_like(x);
PD_CHECK(x.place() == paddle::DefaultGPUPlace());
int64_t numel = x.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
relu_cuda_forward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
x.data<data_t>(), out.data<data_t>(), numel);
}));
return {out};
}
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
CHECK_GPU_INPUT(x);
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(grad_out);
auto grad_x = paddle::empty_like(x);
PD_CHECK(x.place() == paddle::DefaultGPUPlace());
int64_t numel = out.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
relu_cuda_backward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(x.place()),
numel);
}));
return {grad_x};
}
std::vector<paddle::Tensor> relu_cuda_double_backward(
const paddle::Tensor& out, const paddle::Tensor& ddx) {
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(ddx);
auto ddout = paddle::empty(out.shape(), out.dtype(), out.place());
int64_t numel = out.numel();
int64_t block = 512;
int64_t grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_double_backward_kernel", ([&] {
relu_cuda_double_backward_kernel<data_t>
<<<grid, block, 0, out.stream()>>>(
out.data<data_t>(),
ddx.data<data_t>(),
ddout.mutable_data<data_t>(out.place()),
numel);
}));
return {ddout};
}
std::vector<paddle::Tensor> relu_cuda_backward_without_x(
const paddle::Tensor& out, const paddle::Tensor& grad_out) {
auto grad_x = paddle::empty(out.shape(), out.dtype(), out.place());
int numel = out.numel();
int block = 512;
int grid = (numel + block - 1) / block;
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
relu_cuda_backward_kernel<data_t><<<grid, block, 0, out.stream()>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x.mutable_data<data_t>(out.place()),
numel);
}));
return {grad_x};
}
void relu_cuda_forward_out(const paddle::Tensor& x, paddle::Tensor* out) {
int numel = x.numel();
int block = 512;
int grid = (numel + block - 1) / block;
out->reshape(x.shape());
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "relu_cuda_forward_kernel", ([&] {
relu_cuda_forward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
x.data<data_t>(), out->mutable_data<data_t>(x.place()), numel);
}));
}
void relu_cuda_backward_out(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out,
paddle::Tensor* grad_x) {
int numel = out.numel();
int block = 512;
int grid = (numel + block - 1) / block;
grad_x->reshape(x.shape());
PD_DISPATCH_FLOATING_AND_HALF_TYPES(
out.type(), "relu_cuda_backward_kernel", ([&] {
relu_cuda_backward_kernel<data_t><<<grid, block, 0, x.stream()>>>(
grad_out.data<data_t>(),
out.data<data_t>(),
grad_x->mutable_data<data_t>(x.place()),
numel);
}));
}
|
d9013b89e7f5747c5e58263580d461927d2b911e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumArrayOnGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumArrayOnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumArrayOnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumArrayOnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d9013b89e7f5747c5e58263580d461927d2b911e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumArrayOnGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumArrayOnGPU<<<gridBlock,threadBlock>>>(A,B,C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumArrayOnGPU<<<gridBlock,threadBlock>>>(A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumArrayOnGPU<<<gridBlock,threadBlock>>>(A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
keywordfinderV4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* keywordfinder.c - finds key words in a large file
*
* Created on: 19 Feb. 2019
* Author: Eric McCreath
*/
#include<stdio.h>
#include<stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/time.h>
#include<cuda.h>
// this macro checks for errors in cuda calls
#define Err(ans) { gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", hipGetErrorString(code), file,
line);
exit(code);
}
}
struct timeval tv1, tv2;
void timestart() {
gettimeofday(&tv1, NULL);
}
void timestop() {
gettimeofday(&tv2, NULL);
}
void timereport() {
printf("%f",
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000
+ (double) (tv2.tv_sec - tv1.tv_sec));
}
void err(const char *str) {
printf("error : %s\n", str);
exit(1);
}
int loadfile(char *str, char **data) {
struct stat filestat;
if (stat(str, &filestat) == -1)
err("problem stating file");
FILE *file;
if ((file = fopen(str, "r")) == NULL)
err("problem opening file");
Err(hipHostMalloc(data, filestat.st_size));
fread(*data, filestat.st_size, 1, file);
return filestat.st_size;
}
int count(char marker, char *data, int size) {
int i;
int sum = 0;
for (i = 0; i < size; i++) {
if (data[i] == marker)
sum++;
}
return sum;
}
struct find {
int pos;
int word;
};
__device__ int check(char *sdata, int datasize, int pos, int tidx, char *word) {
int i = 0;
while (word[i] != 0 && sdata[tidx + i] == word[i] && pos + i < datasize) {
i++;
}
if (word[i] == 0)
return 1;
return 0;
}
#define MAXKEYWORDSIZE 1024
__constant__ char keywords[MAXKEYWORDSIZE];
#define MAXKEYWORDS 100
__constant__ int wordsindex[MAXKEYWORDS];
#define MAXKEYLEN 20
__global__ void findkeywords(char *data, int datasize, int numwords,
struct find *finds, int maxfinds, unsigned int *findcount) {
int i;
char *word;
int resultspot;
extern __shared__ char datashared[];
int pos = blockIdx.x * blockDim.x + threadIdx.x;
datashared[threadIdx.x] = data[pos];
if (threadIdx.x < MAXKEYLEN && pos+blockDim.x < datasize) datashared[threadIdx.x + blockDim.x] = data[pos+blockDim.x];
__syncthreads();
// printf("pos : %d", pos);
for (i = 0; i < numwords; i++) {
word = keywords + wordsindex[i];
if (check(datashared, datasize, pos, threadIdx.x, word)) {
resultspot = atomicInc(findcount, datasize);
if (resultspot < maxfinds) {
finds[resultspot].pos = pos;
finds[resultspot].word = i;
}
}
}
// printf("fc : %d\n", *findcount);
// printf("efc : %d\n", *findcount);
}
int main(int argc, char *argv[]) {
if (argc != 3)
err("usage: keywordfinder textfile keywords");
// load the text files into memory
char *data_h, *data_d;
int datasize;
datasize = loadfile(argv[1], &data_h);
Err(hipMalloc(&data_d, datasize));
char *keywords_h;
int keywordssize;
keywordssize = loadfile(argv[2], &keywords_h);
//Err(hipMalloc(&keywords_d, keywordssize));
// obtain an index into the keywords. So "wordsindex[i]" is
// the position within "keywords" that keyword "i" starts.
int numwords = count('\n', keywords_h, keywordssize);
int *wordsindex_h;
Err(hipHostMalloc(&wordsindex_h, sizeof(int) * numwords));
//Err(hipMalloc(&wordsindex_d, sizeof(int) * numwords));
if (numwords > MAXKEYWORDS || keywordssize > MAXKEYWORDSIZE)
err("problem too many keywords for constant memory");
int i;
int pos = 0;
wordsindex_h[pos++] = 0;
for (i = 0; i < keywordssize; i++) {
if (keywords_h[i] == '\n') {
keywords_h[i] = 0;
if (pos < numwords)
wordsindex_h[pos++] = i + 1;
}
}
// display the key words
//for (int j=0;j<numwords;j++)
//printf("word : %d %s\n", wordsindex[j], &keywords[wordsindex[j]]);
// set aside some memory for the finds (we fix a maximum number of finds)
// A "struct find" is used to store a find, basically just a mapping between the key word index and the position.
int maxfinds = 2000;
struct find *finds_h, *finds_d;
Err(hipHostMalloc(&finds_h, maxfinds * sizeof(struct find)));
Err(hipMalloc(&finds_d, maxfinds * sizeof(struct find)));
// find the keywords
timestart();
Err(hipMemcpy(data_d, data_h, datasize, hipMemcpyHostToDevice));
Err(hipMemcpyToSymbol(keywords, keywords_h, keywordssize));
Err(hipMemcpyToSymbol(wordsindex, wordsindex_h, sizeof(int) * numwords));
unsigned int *findcount_d;
unsigned int findcount = 0;
Err(hipMalloc(&findcount_d, sizeof(unsigned int)));
Err(
hipMemcpy(findcount_d, &findcount, sizeof(unsigned int),
hipMemcpyHostToDevice));
int t = 256;
hipLaunchKernelGGL(( findkeywords), dim3((datasize-1)/t +1), dim3(t), t+MAXKEYLEN, 0, data_d, datasize, numwords, finds_d, maxfinds,
findcount_d);
Err(
hipMemcpy(&findcount, findcount_d, sizeof(unsigned int),
hipMemcpyDeviceToHost));
Err(
hipMemcpy(finds_h, finds_d,
sizeof(struct find) * min(findcount, maxfinds),
hipMemcpyDeviceToHost));
timestop();
// display the result
for (int k = 0; k < min(findcount, maxfinds); k++) {
printf("%s : %d\n", &keywords_h[wordsindex_h[finds_h[k].word]],
finds_h[k].pos);
}
// printf("time : ");
// timereport();
// printf("(s)\n");
return 0;
}
| keywordfinderV4.cu | /*
* keywordfinder.c - finds key words in a large file
*
* Created on: 19 Feb. 2019
* Author: Eric McCreath
*/
#include<stdio.h>
#include<stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/time.h>
#include<cuda.h>
// this macro checks for errors in cuda calls
#define Err(ans) { gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", cudaGetErrorString(code), file,
line);
exit(code);
}
}
struct timeval tv1, tv2;
void timestart() {
gettimeofday(&tv1, NULL);
}
void timestop() {
gettimeofday(&tv2, NULL);
}
void timereport() {
printf("%f",
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000
+ (double) (tv2.tv_sec - tv1.tv_sec));
}
void err(const char *str) {
printf("error : %s\n", str);
exit(1);
}
int loadfile(char *str, char **data) {
struct stat filestat;
if (stat(str, &filestat) == -1)
err("problem stating file");
FILE *file;
if ((file = fopen(str, "r")) == NULL)
err("problem opening file");
Err(cudaMallocHost(data, filestat.st_size));
fread(*data, filestat.st_size, 1, file);
return filestat.st_size;
}
int count(char marker, char *data, int size) {
int i;
int sum = 0;
for (i = 0; i < size; i++) {
if (data[i] == marker)
sum++;
}
return sum;
}
struct find {
int pos;
int word;
};
__device__ int check(char *sdata, int datasize, int pos, int tidx, char *word) {
int i = 0;
while (word[i] != 0 && sdata[tidx + i] == word[i] && pos + i < datasize) {
i++;
}
if (word[i] == 0)
return 1;
return 0;
}
#define MAXKEYWORDSIZE 1024
__constant__ char keywords[MAXKEYWORDSIZE];
#define MAXKEYWORDS 100
__constant__ int wordsindex[MAXKEYWORDS];
#define MAXKEYLEN 20
__global__ void findkeywords(char *data, int datasize, int numwords,
struct find *finds, int maxfinds, unsigned int *findcount) {
int i;
char *word;
int resultspot;
extern __shared__ char datashared[];
int pos = blockIdx.x * blockDim.x + threadIdx.x;
datashared[threadIdx.x] = data[pos];
if (threadIdx.x < MAXKEYLEN && pos+blockDim.x < datasize) datashared[threadIdx.x + blockDim.x] = data[pos+blockDim.x];
__syncthreads();
// printf("pos : %d", pos);
for (i = 0; i < numwords; i++) {
word = keywords + wordsindex[i];
if (check(datashared, datasize, pos, threadIdx.x, word)) {
resultspot = atomicInc(findcount, datasize);
if (resultspot < maxfinds) {
finds[resultspot].pos = pos;
finds[resultspot].word = i;
}
}
}
// printf("fc : %d\n", *findcount);
// printf("efc : %d\n", *findcount);
}
int main(int argc, char *argv[]) {
if (argc != 3)
err("usage: keywordfinder textfile keywords");
// load the text files into memory
char *data_h, *data_d;
int datasize;
datasize = loadfile(argv[1], &data_h);
Err(cudaMalloc(&data_d, datasize));
char *keywords_h;
int keywordssize;
keywordssize = loadfile(argv[2], &keywords_h);
//Err(cudaMalloc(&keywords_d, keywordssize));
// obtain an index into the keywords. So "wordsindex[i]" is
// the position within "keywords" that keyword "i" starts.
int numwords = count('\n', keywords_h, keywordssize);
int *wordsindex_h;
Err(cudaMallocHost(&wordsindex_h, sizeof(int) * numwords));
//Err(cudaMalloc(&wordsindex_d, sizeof(int) * numwords));
if (numwords > MAXKEYWORDS || keywordssize > MAXKEYWORDSIZE)
err("problem too many keywords for constant memory");
int i;
int pos = 0;
wordsindex_h[pos++] = 0;
for (i = 0; i < keywordssize; i++) {
if (keywords_h[i] == '\n') {
keywords_h[i] = 0;
if (pos < numwords)
wordsindex_h[pos++] = i + 1;
}
}
// display the key words
//for (int j=0;j<numwords;j++)
//printf("word : %d %s\n", wordsindex[j], &keywords[wordsindex[j]]);
// set aside some memory for the finds (we fix a maximum number of finds)
// A "struct find" is used to store a find, basically just a mapping between the key word index and the position.
int maxfinds = 2000;
struct find *finds_h, *finds_d;
Err(cudaMallocHost(&finds_h, maxfinds * sizeof(struct find)));
Err(cudaMalloc(&finds_d, maxfinds * sizeof(struct find)));
// find the keywords
timestart();
Err(cudaMemcpy(data_d, data_h, datasize, cudaMemcpyHostToDevice));
Err(cudaMemcpyToSymbol(keywords, keywords_h, keywordssize));
Err(cudaMemcpyToSymbol(wordsindex, wordsindex_h, sizeof(int) * numwords));
unsigned int *findcount_d;
unsigned int findcount = 0;
Err(cudaMalloc(&findcount_d, sizeof(unsigned int)));
Err(
cudaMemcpy(findcount_d, &findcount, sizeof(unsigned int),
cudaMemcpyHostToDevice));
int t = 256;
findkeywords<<<(datasize-1)/t +1, t, t+MAXKEYLEN>>>(data_d, datasize, numwords, finds_d, maxfinds,
findcount_d);
Err(
cudaMemcpy(&findcount, findcount_d, sizeof(unsigned int),
cudaMemcpyDeviceToHost));
Err(
cudaMemcpy(finds_h, finds_d,
sizeof(struct find) * min(findcount, maxfinds),
cudaMemcpyDeviceToHost));
timestop();
// display the result
for (int k = 0; k < min(findcount, maxfinds); k++) {
printf("%s : %d\n", &keywords_h[wordsindex_h[finds_h[k].word]],
finds_h[k].pos);
}
// printf("time : ");
// timereport();
// printf("(s)\n");
return 0;
}
|
e340b4700102146bf757b95130c928213a3730e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompare.cuh"
#include "THHTensor.hpp"
#include "THHStream.hpp"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateCharType.h"
| e340b4700102146bf757b95130c928213a3730e6.cu | #include "../THCTensorMathCompare.cuh"
#include "THCTensor.hpp"
#include "THCStream.hpp"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateCharType.h"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.