hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f114cffcd17fef49b80c3847f480dfd0ad338097.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include "select3Plugin.h"
#define CHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cout << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
using namespace nvinfer1;
using nvinfer1::plugin::RNNTSelectPlugin;
using nvinfer1::plugin::RNNTSelectPluginCreator;
REGISTER_TENSORRT_PLUGIN(RNNTSelectPluginCreator);
__global__ void select3(int batchSize,
int size1,
int size2,
int size3,
bool* input_select,
half* input0_hidden,
half* input1_hidden,
half* input0_cell,
half* input1_cell,
int32_t* input0_winner,
int32_t* input1_winner,
half* isel_hidden,
half* isel_cell,
int32_t* isel_winner) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
int example = blockIdx.y * blockDim.y + threadIdx.y;
if (example >= batchSize) return;
bool sel = input_select[example];
if (!sel) return;
if (element < size1) {
isel_hidden[example * size1 + element] = input0_hidden[example * size1 + element];
}
if (element < size2) {
isel_cell[example * size2 + element] = input0_cell[example * size2 + element];
}
if (element < size3) {
isel_winner[example * size3 + element] = input0_winner[example * size3 + element];
}
}
RNNTSelectPlugin::RNNTSelectPlugin(const PluginFieldCollection *fc) {
}
RNNTSelectPlugin::RNNTSelectPlugin(const void* data, size_t length) {}
const char* RNNTSelectPlugin::getPluginType() const noexcept
{
return "RNNTSelectPlugin";
}
const char* RNNTSelectPlugin::getPluginVersion() const noexcept
{
return "1";
}
void RNNTSelectPlugin::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* RNNTSelectPlugin::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
void RNNTSelectPlugin::destroy() noexcept
{
delete this;
}
IPluginV2DynamicExt* RNNTSelectPlugin::clone() const noexcept
{
size_t sz = getSerializationSize();
char* buff = (char*) malloc(getSerializationSize());
// serialize is an assertion sanity check because SelectPlugin is sizeless
serialize(buff);
RNNTSelectPlugin* ret = new RNNTSelectPlugin(buff, sz);
free(buff);
return ret;
}
int RNNTSelectPlugin::getNbOutputs() const noexcept
{
return 3;
}
DimsExprs RNNTSelectPlugin::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept
{
assert(outputIndex >= 0 && outputIndex < this->getNbOutputs());
assert(nbInputs == 7);
return inputs[outputIndex * 2 + 1];
}
bool RNNTSelectPlugin::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) noexcept
{
if (inOut[pos].format != TensorFormat::kLINEAR)
{
return false;
}
if (nbInputs != 7 || nbOutputs != 3) {
printf("Wrong input or output count %d %d\n", nbInputs, nbOutputs);
return false;
}
// if (pos == 0 && inOut[pos].type != DataType::kBOOL) {
// return false;
// }
if (pos == 0 && inOut[pos].type != DataType::kINT32) {
return false;
}
if (pos >= 1 && pos < 5 && inOut[pos].type != DataType::kHALF) {
return false;
}
if (pos >= 5 && pos < 7 && inOut[pos].type != DataType::kINT32) {
return false;
}
if (pos >= 7 && pos < 9 && inOut[pos].type != DataType::kHALF) {
return false;
}
if (pos == 9 && inOut[pos].type != DataType::kINT32) {
return false;
}
return true;
}
void RNNTSelectPlugin::configurePlugin(
const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs) noexcept
{
}
int RNNTSelectPlugin::initialize() noexcept
{
return hipSuccess;
}
void RNNTSelectPlugin::terminate() noexcept {}
size_t RNNTSelectPlugin::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const noexcept
{
size_t size = 0;
return size;
}
// int RNNTSelectPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t
// stream) {
int RNNTSelectPlugin::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept
{
int batchSize = inputDesc[0].dims.d[0];
int size1 = inputDesc[1].dims.d[1] * inputDesc[1].dims.d[2];
int size2 = inputDesc[3].dims.d[1] * inputDesc[3].dims.d[2];
int size3 = inputDesc[5].dims.d[1] * inputDesc[5].dims.d[2];
// Isn't there a max int somewhere? Probably.
int maxSize = size1 > size2 ? size1 : size2;
maxSize = maxSize > size3 ? maxSize : size3;
dim3 blockDim = dim3(32, 8, 1);
dim3 gridDim = dim3((maxSize + blockDim.x - 1) / blockDim.x, (batchSize + blockDim.y - 1) / blockDim.y, 1);
hipLaunchKernelGGL(( select3) , dim3(gridDim), dim3(blockDim), 0, stream , batchSize,
size1,
size2,
size3,
(bool*)inputs[0],
(half*)inputs[1],
(half*)inputs[2],
(half*)inputs[3],
(half*)inputs[4],
(int32_t*)inputs[5],
(int32_t*)inputs[6],
(half*)outputs[0],
(half*)outputs[1],
(int32_t*)outputs[2]);
return 0;
}
size_t RNNTSelectPlugin::getSerializationSize() const noexcept
{
size_t sz = 0;
return sz;
}
void RNNTSelectPlugin::serialize(void* buffer) const noexcept
{
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
char* d = static_cast<char*>(buffer);
auto *d_start = d;
#endif
assert(d == d_start + getSerializationSize());
}
nvinfer1::DataType RNNTSelectPlugin::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
if (index < 2)
{
return DataType::kHALF;
}
else {
return DataType::kINT32;
}
}
template <typename T>
void RNNTSelectPlugin::write(char*& buffer, const T& val) const
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template <typename T>
void RNNTSelectPlugin::read(const char*& buffer, T& val) const
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
const char* RNNTSelectPluginCreator::getPluginName() const noexcept
{
return "RNNTSelectPlugin";
}
const char* RNNTSelectPluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* RNNTSelectPluginCreator::getFieldNames() noexcept
{
return nullptr;
}
void RNNTSelectPluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* RNNTSelectPluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
IPluginV2DynamicExt* RNNTSelectPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
return new RNNTSelectPlugin(fc);
}
IPluginV2DynamicExt* RNNTSelectPluginCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength) noexcept
{
return new RNNTSelectPlugin(serialData, serialLength);
}
|
f114cffcd17fef49b80c3847f480dfd0ad338097.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include "select3Plugin.h"
#define CHECK(status) \
do \
{ \
auto ret = (status); \
if (ret != 0) \
{ \
std::cout << "Cuda failure: " << ret << std::endl; \
abort(); \
} \
} while (0)
using namespace nvinfer1;
using nvinfer1::plugin::RNNTSelectPlugin;
using nvinfer1::plugin::RNNTSelectPluginCreator;
REGISTER_TENSORRT_PLUGIN(RNNTSelectPluginCreator);
__global__ void select3(int batchSize,
int size1,
int size2,
int size3,
bool* input_select,
half* input0_hidden,
half* input1_hidden,
half* input0_cell,
half* input1_cell,
int32_t* input0_winner,
int32_t* input1_winner,
half* isel_hidden,
half* isel_cell,
int32_t* isel_winner) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
int example = blockIdx.y * blockDim.y + threadIdx.y;
if (example >= batchSize) return;
bool sel = input_select[example];
if (!sel) return;
if (element < size1) {
isel_hidden[example * size1 + element] = input0_hidden[example * size1 + element];
}
if (element < size2) {
isel_cell[example * size2 + element] = input0_cell[example * size2 + element];
}
if (element < size3) {
isel_winner[example * size3 + element] = input0_winner[example * size3 + element];
}
}
RNNTSelectPlugin::RNNTSelectPlugin(const PluginFieldCollection *fc) {
}
RNNTSelectPlugin::RNNTSelectPlugin(const void* data, size_t length) {}
const char* RNNTSelectPlugin::getPluginType() const noexcept
{
return "RNNTSelectPlugin";
}
const char* RNNTSelectPlugin::getPluginVersion() const noexcept
{
return "1";
}
void RNNTSelectPlugin::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* RNNTSelectPlugin::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
void RNNTSelectPlugin::destroy() noexcept
{
delete this;
}
IPluginV2DynamicExt* RNNTSelectPlugin::clone() const noexcept
{
size_t sz = getSerializationSize();
char* buff = (char*) malloc(getSerializationSize());
// serialize is an assertion sanity check because SelectPlugin is sizeless
serialize(buff);
RNNTSelectPlugin* ret = new RNNTSelectPlugin(buff, sz);
free(buff);
return ret;
}
int RNNTSelectPlugin::getNbOutputs() const noexcept
{
return 3;
}
DimsExprs RNNTSelectPlugin::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder) noexcept
{
assert(outputIndex >= 0 && outputIndex < this->getNbOutputs());
assert(nbInputs == 7);
return inputs[outputIndex * 2 + 1];
}
bool RNNTSelectPlugin::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) noexcept
{
if (inOut[pos].format != TensorFormat::kLINEAR)
{
return false;
}
if (nbInputs != 7 || nbOutputs != 3) {
printf("Wrong input or output count %d %d\n", nbInputs, nbOutputs);
return false;
}
// if (pos == 0 && inOut[pos].type != DataType::kBOOL) {
// return false;
// }
if (pos == 0 && inOut[pos].type != DataType::kINT32) {
return false;
}
if (pos >= 1 && pos < 5 && inOut[pos].type != DataType::kHALF) {
return false;
}
if (pos >= 5 && pos < 7 && inOut[pos].type != DataType::kINT32) {
return false;
}
if (pos >= 7 && pos < 9 && inOut[pos].type != DataType::kHALF) {
return false;
}
if (pos == 9 && inOut[pos].type != DataType::kINT32) {
return false;
}
return true;
}
void RNNTSelectPlugin::configurePlugin(
const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs) noexcept
{
}
int RNNTSelectPlugin::initialize() noexcept
{
return cudaSuccess;
}
void RNNTSelectPlugin::terminate() noexcept {}
size_t RNNTSelectPlugin::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const noexcept
{
size_t size = 0;
return size;
}
// int RNNTSelectPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t
// stream) {
int RNNTSelectPlugin::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept
{
int batchSize = inputDesc[0].dims.d[0];
int size1 = inputDesc[1].dims.d[1] * inputDesc[1].dims.d[2];
int size2 = inputDesc[3].dims.d[1] * inputDesc[3].dims.d[2];
int size3 = inputDesc[5].dims.d[1] * inputDesc[5].dims.d[2];
// Isn't there a max int somewhere? Probably.
int maxSize = size1 > size2 ? size1 : size2;
maxSize = maxSize > size3 ? maxSize : size3;
dim3 blockDim = dim3(32, 8, 1);
dim3 gridDim = dim3((maxSize + blockDim.x - 1) / blockDim.x, (batchSize + blockDim.y - 1) / blockDim.y, 1);
select3 <<< gridDim, blockDim, 0, stream >>> (batchSize,
size1,
size2,
size3,
(bool*)inputs[0],
(half*)inputs[1],
(half*)inputs[2],
(half*)inputs[3],
(half*)inputs[4],
(int32_t*)inputs[5],
(int32_t*)inputs[6],
(half*)outputs[0],
(half*)outputs[1],
(int32_t*)outputs[2]);
return 0;
}
size_t RNNTSelectPlugin::getSerializationSize() const noexcept
{
size_t sz = 0;
return sz;
}
void RNNTSelectPlugin::serialize(void* buffer) const noexcept
{
// Use maybe_unused attribute when updating to CUDA_STANDARD C++17
#ifndef NDEBUG
char* d = static_cast<char*>(buffer);
auto *d_start = d;
#endif
assert(d == d_start + getSerializationSize());
}
nvinfer1::DataType RNNTSelectPlugin::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
if (index < 2)
{
return DataType::kHALF;
}
else {
return DataType::kINT32;
}
}
template <typename T>
void RNNTSelectPlugin::write(char*& buffer, const T& val) const
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template <typename T>
void RNNTSelectPlugin::read(const char*& buffer, T& val) const
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
const char* RNNTSelectPluginCreator::getPluginName() const noexcept
{
return "RNNTSelectPlugin";
}
const char* RNNTSelectPluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* RNNTSelectPluginCreator::getFieldNames() noexcept
{
return nullptr;
}
void RNNTSelectPluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* RNNTSelectPluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
IPluginV2DynamicExt* RNNTSelectPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
return new RNNTSelectPlugin(fc);
}
IPluginV2DynamicExt* RNNTSelectPluginCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength) noexcept
{
return new RNNTSelectPlugin(serialData, serialLength);
}
|
e7c11097ca851f0574cd857b21b0d05a5e937b86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_mutualinformation_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_GPU_CU
#define _REG_MUTUALINFORMATION_GPU_CU
#include "_reg_blocksize_gpu.h"
#include "_reg_mutualinformation_gpu.h"
#include "_reg_mutualinformation_kernels.cu"
#include <iostream>
/// Called when we have two target and two source image
void reg_getEntropies2x2_gpu(nifti_image *targetImages,
nifti_image *resultImages,
//int type,
unsigned int *target_bins, // should be an array of size num_target_volumes
unsigned int *result_bins, // should be an array of size num_result_volumes
double *probaJointHistogram,
double *logJointHistogram,
float **logJointHistogram_d,
double *entropies,
int *mask)
{
// The joint histogram is filled using the CPU arrays
//Check the type of the target and source images
if(targetImages->datatype!=NIFTI_TYPE_FLOAT32 || resultImages->datatype!=NIFTI_TYPE_FLOAT32){
printf("[NiftyReg CUDA] reg_getEntropies2x2_gpu: This kernel should only be used floating images.\n");
exit(1);
}
unsigned int voxelNumber = targetImages->nx*targetImages->ny*targetImages->nz;
unsigned int binNumber = target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1]+
target_bins[0]*target_bins[1]+result_bins[0]*result_bins[1];
float *ref1Ptr = static_cast<float *>(targetImages->data);
float *ref2Ptr = &ref1Ptr[voxelNumber];
float *res1Ptr = static_cast<float *>(resultImages->data);
float *res2Ptr = &res1Ptr[voxelNumber];
int *maskPtr = &mask[0];
memset(probaJointHistogram, 0, binNumber*sizeof(double));
double voxelSum=0.;
for(unsigned int i=0;i<voxelNumber;++i){
if(*maskPtr++>-1){
int val1 = static_cast<int>(*ref1Ptr);
int val2 = static_cast<int>(*ref2Ptr);
int val3 = static_cast<int>(*res1Ptr);
int val4 = static_cast<int>(*res2Ptr);
if(val1==val1 && val2==val2 && val3==val3 && val4==val4 &&
val1>-1 && val1<(int)target_bins[0] && val2>-1 && val2<(int)target_bins[1] &&
val3>-1 && val3<(int)result_bins[0] && val4>-1 && val4<(int)result_bins[1]){
unsigned int index = ((val4*result_bins[0]+val3)*target_bins[1]+val2)*target_bins[0]+val1;
probaJointHistogram[index]++;
voxelSum++;
}
}
ref1Ptr++;
ref2Ptr++;
res1Ptr++;
res2Ptr++;
}
// The joint histogram is normalised and tranfered to the device
float *logJointHistogram_float=NULL;
NR_CUDA_SAFE_CALL(hipHostMalloc(&logJointHistogram_float,binNumber*sizeof(float)));
for(unsigned int i=0;i<target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1];++i)
logJointHistogram_float[i]=float(probaJointHistogram[i]/voxelSum);
NR_CUDA_SAFE_CALL(hipMemcpy(*logJointHistogram_d,logJointHistogram_float,binNumber*sizeof(float),hipMemcpyHostToDevice));
NR_CUDA_SAFE_CALL(hipHostFree(logJointHistogram_float));
float *tempHistogram=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&tempHistogram,binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstTargetBin,&target_bins[0],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_secondTargetBin,&target_bins[1],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstResultBin,&result_bins[0],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_secondResultBin,&result_bins[1],sizeof(int)));
// The joint histogram is smoothed along the x axis
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B1(Block_reg_smoothJointHistogramX,1,1);
const int gridSizesmoothJointHistogramX=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B1.x));
dim3 G1(gridSizesmoothJointHistogramX,gridSizesmoothJointHistogramX,1);
hipLaunchKernelGGL(( reg_smoothJointHistogramX_kernel) , dim3(G1), dim3(B1) , 0, 0, tempHistogram);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the y axis
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, tempHistogram, binNumber*sizeof(float)));
dim3 B2(Block_reg_smoothJointHistogramY,1,1);
const int gridSizesmoothJointHistogramY=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B2.x));
dim3 G2(gridSizesmoothJointHistogramY,gridSizesmoothJointHistogramY,1);
hipLaunchKernelGGL(( reg_smoothJointHistogramY_kernel) , dim3(G2), dim3(B2) , 0, 0, *logJointHistogram_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the z axis
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B3(Block_reg_smoothJointHistogramZ,1,1);
const int gridSizesmoothJointHistogramZ=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B3.x));
dim3 G3(gridSizesmoothJointHistogramZ,gridSizesmoothJointHistogramZ,1);
hipLaunchKernelGGL(( reg_smoothJointHistogramZ_kernel) , dim3(G3), dim3(B3) , 0, 0, tempHistogram);
NR_CUDA_CHECK_KERNEL(G3,B3)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the w axis
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, tempHistogram, binNumber*sizeof(float)));
dim3 B4(Block_reg_smoothJointHistogramW,1,1);
const int gridSizesmoothJointHistogramW=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B4.x));
dim3 G4(gridSizesmoothJointHistogramW,gridSizesmoothJointHistogramW,1);
hipLaunchKernelGGL(( reg_smoothJointHistogramW_kernel) , dim3(G4), dim3(B4) , 0, 0, *logJointHistogram_d);
NR_CUDA_CHECK_KERNEL(G4,B4)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(hipFree(tempHistogram));
NR_CUDA_SAFE_CALL(hipHostMalloc(&logJointHistogram_float,binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipMemcpy(logJointHistogram_float,*logJointHistogram_d,binNumber*sizeof(float),hipMemcpyDeviceToHost));
for(unsigned int i=0;i<target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1];++i)
probaJointHistogram[i]=logJointHistogram_float[i];
NR_CUDA_SAFE_CALL(hipHostFree(logJointHistogram_float));
// The 4D joint histogram is first marginalised along the x axis (target_bins[0])
float *temp3DHistogram=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&temp3DHistogram,target_bins[1]*result_bins[0]*result_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B5(Block_reg_marginaliseTargetX,1,1);
const int gridSizesmoothJointHistogramA=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B5.x));
dim3 G5(gridSizesmoothJointHistogramA,gridSizesmoothJointHistogramA,1);
hipLaunchKernelGGL(( reg_marginaliseTargetX_kernel) , dim3(G5), dim3(B5) , 0, 0, temp3DHistogram);
NR_CUDA_CHECK_KERNEL(G5,B5)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
// The 3D joint histogram is then marginalised along the y axis (target_bins[1])
float *temp2DHistogram=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&temp2DHistogram,result_bins[0]*result_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, temp3DHistogram, target_bins[1]*result_bins[0]*result_bins[1]*sizeof(float)));
dim3 B6(Block_reg_marginaliseTargetXY,1,1);
const int gridSizesmoothJointHistogramB=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B6.x));
dim3 G6(gridSizesmoothJointHistogramB,gridSizesmoothJointHistogramB,1);
hipLaunchKernelGGL(( reg_marginaliseTargetXY_kernel) , dim3(G6), dim3(B6) , 0, 0, temp2DHistogram);
NR_CUDA_CHECK_KERNEL(G6,B6)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(hipFree(temp3DHistogram));
// We need to transfer it to an array of floats (cannot directly copy it to probaJointHistogram
// as that is an array of doubles) and hipMemcpy will produce unpredictable results
const int total_target_entries = target_bins[0] * target_bins[1];
const int total_result_entries = result_bins[0] * result_bins[1];
const int num_probabilities = total_target_entries * total_result_entries;
int offset = num_probabilities + total_target_entries;
float *temp2DHistogram_h = new float[total_result_entries];
hipMemcpy(temp2DHistogram_h,temp2DHistogram,total_result_entries*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < total_result_entries; ++i) {
probaJointHistogram[offset + i] = temp2DHistogram_h[i];
}
delete[] temp2DHistogram_h;
NR_CUDA_SAFE_CALL(hipFree(temp2DHistogram));
// Now marginalise over the result axes.
// First over W axes. (result_bins[1])
temp3DHistogram=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&temp3DHistogram, target_bins[0]*target_bins[1]*result_bins[0]*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B7(Block_reg_marginaliseResultX,1,1);
const int gridSizesmoothJointHistogramC=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B7.x));
dim3 G7(gridSizesmoothJointHistogramC,gridSizesmoothJointHistogramC,1);
hipLaunchKernelGGL(( reg_marginaliseResultX_kernel) , dim3(G7), dim3(B7) , 0, 0, temp3DHistogram);
NR_CUDA_CHECK_KERNEL(G7,B7)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
// Now over Z axes. (result_bins[0])
temp2DHistogram=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&temp2DHistogram,target_bins[0]*target_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, temp3DHistogram, target_bins[0]*target_bins[1]*result_bins[0]*sizeof(float)));
dim3 B8(Block_reg_marginaliseResultXY,1,1);
const int gridSizesmoothJointHistogramD=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B8.x));
dim3 G8(gridSizesmoothJointHistogramD,gridSizesmoothJointHistogramD,1);
hipLaunchKernelGGL(( reg_marginaliseResultXY_kernel) , dim3(G8), dim3(B8) , 0, 0, temp2DHistogram);
NR_CUDA_CHECK_KERNEL(G8,B8)
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
hipFree(temp3DHistogram);
// Transfer the data to CPU
temp2DHistogram_h = new float[total_target_entries];
hipMemcpy(temp2DHistogram_h,temp2DHistogram,total_target_entries*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < total_target_entries; ++i) {
probaJointHistogram[num_probabilities + i] = temp2DHistogram_h[i];
}
delete[] temp2DHistogram_h;
hipFree(temp2DHistogram);
// The next bits can be put on the GPU but there is not much performance gain and it is
// better to go the log and accumulation using double precision.
// Generate joint entropy
float current_value, current_log;
double joint_entropy = 0.0;
for (int i = 0; i < num_probabilities; ++i)
{
current_value = probaJointHistogram[i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
joint_entropy -= current_value * current_log;
logJointHistogram[i] = current_log;
}
// Generate target entropy
double *log_joint_target = &logJointHistogram[num_probabilities];
double target_entropy = 0.0;
for (int i = 0; i < total_target_entries; ++i)
{
current_value = probaJointHistogram[num_probabilities + i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
target_entropy -= current_value * current_log;
log_joint_target[i] = current_log;
}
// Generate result entropy
double *log_joint_result = &logJointHistogram[num_probabilities+total_target_entries];
double result_entropy = 0.0;
for (int i = 0; i < total_result_entries; ++i)
{
current_value = probaJointHistogram[num_probabilities + total_target_entries + i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
result_entropy -= current_value * current_log;
log_joint_result[i] = current_log;
}
entropies[0] = target_entropy;
entropies[1] = result_entropy;
entropies[2] = joint_entropy;
entropies[3] = voxelSum;
}
/// Called when we only have one target and one source image
void reg_getVoxelBasedNMIGradientUsingPW_gpu( nifti_image *targetImage,
nifti_image *resultImage,
hipArray **targetImageArray_d,
float **resultImageArray_d,
float4 **resultGradientArray_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
int refBinning,
int floBinning)
{
if(resultImage!=resultImage)
printf("Useless lines to avoid a warning");
const int voxelNumber = targetImage->nx*targetImage->ny*targetImage->nz;
const int3 imageSize=make_int3(targetImage->nx,targetImage->ny,targetImage->nz);
const int binNumber = refBinning*floBinning+refBinning+floBinning;
const float4 entropies_h=make_float4((float)entropies[0],(float)entropies[1],(float)entropies[2],(float)entropies[3]);
const float NMI = (float)((entropies[0]+entropies[1])/entropies[2]);
// Bind Symbols
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(int3)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstTargetBin,&refBinning,sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstResultBin,&floBinning,sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture bindingcurrentFloating
//Bind target image array to a 3D texture
firstTargetImageTexture.normalized = true;
firstTargetImageTexture.filterMode = hipFilterModeLinear;
firstTargetImageTexture.addressMode[0] = hipAddressModeWrap;
firstTargetImageTexture.addressMode[1] = hipAddressModeWrap;
firstTargetImageTexture.addressMode[2] = hipAddressModeWrap;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
NR_CUDA_SAFE_CALL(hipBindTextureToArray(firstTargetImageTexture, *targetImageArray_d, channelDesc))
NR_CUDA_SAFE_CALL(hipBindTexture(0, firstResultImageTexture, *resultImageArray_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, firstResultImageGradientTexture, *resultGradientArray_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW =
(unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW));
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW,Grid_reg_getVoxelBasedNMIGradientUsingPW,1);
hipLaunchKernelGGL(( reg_getVoxelBasedNMIGradientUsingPW_kernel) , dim3(G1), dim3(B1) , 0, 0, *voxelNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstTargetImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstResultImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstResultImageGradientTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture));
}
/// Called when we have two target and two source image
void reg_getVoxelBasedNMIGradientUsingPW2x2_gpu(nifti_image *targetImage,
nifti_image *resultImage,
hipArray **targetImageArray1_d,
hipArray **targetImageArray2_d,
float **resultImageArray1_d,
float **resultImageArray2_d,
float4 **resultGradientArray1_d,
float4 **resultGradientArray2_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
unsigned int *targetBinning,
unsigned int *resultBinning)
{
if (targetImage->nt != 2 || resultImage->nt != 2) {
printf("[NiftyReg CUDA] reg_getVoxelBasedNMIGradientUsingPW2x2_gpu: This kernel should only be used with two target and source images\n");
return;
}
const int voxelNumber = targetImage->nx*targetImage->ny*targetImage->nz;
const int3 imageSize=make_int3(targetImage->nx,targetImage->ny,targetImage->nz);
const float4 entropies_h=make_float4((float)entropies[0],(float)entropies[1],(float)entropies[2],(float)entropies[3]);
const float NMI = (float)((entropies[0]+entropies[1])/entropies[2]);
const int binNumber = targetBinning[0]*targetBinning[1]*resultBinning[0]*resultBinning[1] + (targetBinning[0]*targetBinning[1]) + (resultBinning[0]*resultBinning[1]);
// Bind Symbols
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(int3)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstTargetBin,&targetBinning[0],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_secondTargetBin,&targetBinning[1],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_firstResultBin,&resultBinning[0],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_secondResultBin,&resultBinning[1],sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture binding
firstTargetImageTexture.normalized = true;
firstTargetImageTexture.filterMode = hipFilterModeLinear;
firstTargetImageTexture.addressMode[0] = hipAddressModeWrap;
firstTargetImageTexture.addressMode[1] = hipAddressModeWrap;
firstTargetImageTexture.addressMode[2] = hipAddressModeWrap;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
NR_CUDA_SAFE_CALL(hipBindTextureToArray(firstTargetImageTexture, *targetImageArray1_d, channelDesc))
NR_CUDA_SAFE_CALL(hipBindTextureToArray(secondTargetImageTexture, *targetImageArray2_d, channelDesc))
NR_CUDA_SAFE_CALL(hipBindTexture(0, firstResultImageTexture, *resultImageArray1_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, secondResultImageTexture, *resultImageArray2_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, firstResultImageGradientTexture, *resultGradientArray1_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, secondResultImageGradientTexture, *resultGradientArray2_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
NR_CUDA_SAFE_CALL(hipMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW2x2 =
(unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW2x2));
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW2x2,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW2x2,Grid_reg_getVoxelBasedNMIGradientUsingPW2x2,1);
hipLaunchKernelGGL(( reg_getVoxelBasedNMIGradientUsingPW2x2_kernel) , dim3(G1), dim3(B1) , 0, 0, *voxelNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstTargetImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(secondTargetImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstResultImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(secondResultImageTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(firstResultImageGradientTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(secondResultImageGradientTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture));
}
#endif
|
e7c11097ca851f0574cd857b21b0d05a5e937b86.cu
|
/*
* _reg_mutualinformation_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_GPU_CU
#define _REG_MUTUALINFORMATION_GPU_CU
#include "_reg_blocksize_gpu.h"
#include "_reg_mutualinformation_gpu.h"
#include "_reg_mutualinformation_kernels.cu"
#include <iostream>
/// Called when we have two target and two source image
void reg_getEntropies2x2_gpu(nifti_image *targetImages,
nifti_image *resultImages,
//int type,
unsigned int *target_bins, // should be an array of size num_target_volumes
unsigned int *result_bins, // should be an array of size num_result_volumes
double *probaJointHistogram,
double *logJointHistogram,
float **logJointHistogram_d,
double *entropies,
int *mask)
{
// The joint histogram is filled using the CPU arrays
//Check the type of the target and source images
if(targetImages->datatype!=NIFTI_TYPE_FLOAT32 || resultImages->datatype!=NIFTI_TYPE_FLOAT32){
printf("[NiftyReg CUDA] reg_getEntropies2x2_gpu: This kernel should only be used floating images.\n");
exit(1);
}
unsigned int voxelNumber = targetImages->nx*targetImages->ny*targetImages->nz;
unsigned int binNumber = target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1]+
target_bins[0]*target_bins[1]+result_bins[0]*result_bins[1];
float *ref1Ptr = static_cast<float *>(targetImages->data);
float *ref2Ptr = &ref1Ptr[voxelNumber];
float *res1Ptr = static_cast<float *>(resultImages->data);
float *res2Ptr = &res1Ptr[voxelNumber];
int *maskPtr = &mask[0];
memset(probaJointHistogram, 0, binNumber*sizeof(double));
double voxelSum=0.;
for(unsigned int i=0;i<voxelNumber;++i){
if(*maskPtr++>-1){
int val1 = static_cast<int>(*ref1Ptr);
int val2 = static_cast<int>(*ref2Ptr);
int val3 = static_cast<int>(*res1Ptr);
int val4 = static_cast<int>(*res2Ptr);
if(val1==val1 && val2==val2 && val3==val3 && val4==val4 &&
val1>-1 && val1<(int)target_bins[0] && val2>-1 && val2<(int)target_bins[1] &&
val3>-1 && val3<(int)result_bins[0] && val4>-1 && val4<(int)result_bins[1]){
unsigned int index = ((val4*result_bins[0]+val3)*target_bins[1]+val2)*target_bins[0]+val1;
probaJointHistogram[index]++;
voxelSum++;
}
}
ref1Ptr++;
ref2Ptr++;
res1Ptr++;
res2Ptr++;
}
// The joint histogram is normalised and tranfered to the device
float *logJointHistogram_float=NULL;
NR_CUDA_SAFE_CALL(cudaMallocHost(&logJointHistogram_float,binNumber*sizeof(float)));
for(unsigned int i=0;i<target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1];++i)
logJointHistogram_float[i]=float(probaJointHistogram[i]/voxelSum);
NR_CUDA_SAFE_CALL(cudaMemcpy(*logJointHistogram_d,logJointHistogram_float,binNumber*sizeof(float),cudaMemcpyHostToDevice));
NR_CUDA_SAFE_CALL(cudaFreeHost(logJointHistogram_float));
float *tempHistogram=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&tempHistogram,binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstTargetBin,&target_bins[0],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_secondTargetBin,&target_bins[1],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstResultBin,&result_bins[0],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_secondResultBin,&result_bins[1],sizeof(int)));
// The joint histogram is smoothed along the x axis
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B1(Block_reg_smoothJointHistogramX,1,1);
const int gridSizesmoothJointHistogramX=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B1.x));
dim3 G1(gridSizesmoothJointHistogramX,gridSizesmoothJointHistogramX,1);
reg_smoothJointHistogramX_kernel <<< G1, B1 >>> (tempHistogram);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the y axis
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, tempHistogram, binNumber*sizeof(float)));
dim3 B2(Block_reg_smoothJointHistogramY,1,1);
const int gridSizesmoothJointHistogramY=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B2.x));
dim3 G2(gridSizesmoothJointHistogramY,gridSizesmoothJointHistogramY,1);
reg_smoothJointHistogramY_kernel <<< G2, B2 >>> (*logJointHistogram_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the z axis
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B3(Block_reg_smoothJointHistogramZ,1,1);
const int gridSizesmoothJointHistogramZ=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B3.x));
dim3 G3(gridSizesmoothJointHistogramZ,gridSizesmoothJointHistogramZ,1);
reg_smoothJointHistogramZ_kernel <<< G3, B3 >>> (tempHistogram);
NR_CUDA_CHECK_KERNEL(G3,B3)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
// The joint histogram is smoothed along the w axis
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, tempHistogram, binNumber*sizeof(float)));
dim3 B4(Block_reg_smoothJointHistogramW,1,1);
const int gridSizesmoothJointHistogramW=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B4.x));
dim3 G4(gridSizesmoothJointHistogramW,gridSizesmoothJointHistogramW,1);
reg_smoothJointHistogramW_kernel <<< G4, B4 >>> (*logJointHistogram_d);
NR_CUDA_CHECK_KERNEL(G4,B4)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(cudaFree(tempHistogram));
NR_CUDA_SAFE_CALL(cudaMallocHost(&logJointHistogram_float,binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaMemcpy(logJointHistogram_float,*logJointHistogram_d,binNumber*sizeof(float),cudaMemcpyDeviceToHost));
for(unsigned int i=0;i<target_bins[0]*target_bins[1]*result_bins[0]*result_bins[1];++i)
probaJointHistogram[i]=logJointHistogram_float[i];
NR_CUDA_SAFE_CALL(cudaFreeHost(logJointHistogram_float));
// The 4D joint histogram is first marginalised along the x axis (target_bins[0])
float *temp3DHistogram=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&temp3DHistogram,target_bins[1]*result_bins[0]*result_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B5(Block_reg_marginaliseTargetX,1,1);
const int gridSizesmoothJointHistogramA=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B5.x));
dim3 G5(gridSizesmoothJointHistogramA,gridSizesmoothJointHistogramA,1);
reg_marginaliseTargetX_kernel <<< G5, B5 >>> (temp3DHistogram);
NR_CUDA_CHECK_KERNEL(G5,B5)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
// The 3D joint histogram is then marginalised along the y axis (target_bins[1])
float *temp2DHistogram=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&temp2DHistogram,result_bins[0]*result_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, temp3DHistogram, target_bins[1]*result_bins[0]*result_bins[1]*sizeof(float)));
dim3 B6(Block_reg_marginaliseTargetXY,1,1);
const int gridSizesmoothJointHistogramB=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B6.x));
dim3 G6(gridSizesmoothJointHistogramB,gridSizesmoothJointHistogramB,1);
reg_marginaliseTargetXY_kernel <<< G6, B6 >>> (temp2DHistogram);
NR_CUDA_CHECK_KERNEL(G6,B6)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(cudaFree(temp3DHistogram));
// We need to transfer it to an array of floats (cannot directly copy it to probaJointHistogram
// as that is an array of doubles) and cudaMemcpy will produce unpredictable results
const int total_target_entries = target_bins[0] * target_bins[1];
const int total_result_entries = result_bins[0] * result_bins[1];
const int num_probabilities = total_target_entries * total_result_entries;
int offset = num_probabilities + total_target_entries;
float *temp2DHistogram_h = new float[total_result_entries];
cudaMemcpy(temp2DHistogram_h,temp2DHistogram,total_result_entries*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < total_result_entries; ++i) {
probaJointHistogram[offset + i] = temp2DHistogram_h[i];
}
delete[] temp2DHistogram_h;
NR_CUDA_SAFE_CALL(cudaFree(temp2DHistogram));
// Now marginalise over the result axes.
// First over W axes. (result_bins[1])
temp3DHistogram=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&temp3DHistogram, target_bins[0]*target_bins[1]*result_bins[0]*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
dim3 B7(Block_reg_marginaliseResultX,1,1);
const int gridSizesmoothJointHistogramC=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B7.x));
dim3 G7(gridSizesmoothJointHistogramC,gridSizesmoothJointHistogramC,1);
reg_marginaliseResultX_kernel <<< G7, B7 >>> (temp3DHistogram);
NR_CUDA_CHECK_KERNEL(G7,B7)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
// Now over Z axes. (result_bins[0])
temp2DHistogram=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&temp2DHistogram,target_bins[0]*target_bins[1]*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, temp3DHistogram, target_bins[0]*target_bins[1]*result_bins[0]*sizeof(float)));
dim3 B8(Block_reg_marginaliseResultXY,1,1);
const int gridSizesmoothJointHistogramD=(int)ceil(sqrtf((float)(target_bins[1]*result_bins[0]*result_bins[1])/(float)B8.x));
dim3 G8(gridSizesmoothJointHistogramD,gridSizesmoothJointHistogramD,1);
reg_marginaliseResultXY_kernel <<< G8, B8 >>> (temp2DHistogram);
NR_CUDA_CHECK_KERNEL(G8,B8)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
cudaFree(temp3DHistogram);
// Transfer the data to CPU
temp2DHistogram_h = new float[total_target_entries];
cudaMemcpy(temp2DHistogram_h,temp2DHistogram,total_target_entries*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < total_target_entries; ++i) {
probaJointHistogram[num_probabilities + i] = temp2DHistogram_h[i];
}
delete[] temp2DHistogram_h;
cudaFree(temp2DHistogram);
// The next bits can be put on the GPU but there is not much performance gain and it is
// better to go the log and accumulation using double precision.
// Generate joint entropy
float current_value, current_log;
double joint_entropy = 0.0;
for (int i = 0; i < num_probabilities; ++i)
{
current_value = probaJointHistogram[i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
joint_entropy -= current_value * current_log;
logJointHistogram[i] = current_log;
}
// Generate target entropy
double *log_joint_target = &logJointHistogram[num_probabilities];
double target_entropy = 0.0;
for (int i = 0; i < total_target_entries; ++i)
{
current_value = probaJointHistogram[num_probabilities + i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
target_entropy -= current_value * current_log;
log_joint_target[i] = current_log;
}
// Generate result entropy
double *log_joint_result = &logJointHistogram[num_probabilities+total_target_entries];
double result_entropy = 0.0;
for (int i = 0; i < total_result_entries; ++i)
{
current_value = probaJointHistogram[num_probabilities + total_target_entries + i];
current_log = 0.0;
if (current_value) current_log = log(current_value);
result_entropy -= current_value * current_log;
log_joint_result[i] = current_log;
}
entropies[0] = target_entropy;
entropies[1] = result_entropy;
entropies[2] = joint_entropy;
entropies[3] = voxelSum;
}
/// Called when we only have one target and one source image
void reg_getVoxelBasedNMIGradientUsingPW_gpu( nifti_image *targetImage,
nifti_image *resultImage,
cudaArray **targetImageArray_d,
float **resultImageArray_d,
float4 **resultGradientArray_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
int refBinning,
int floBinning)
{
if(resultImage!=resultImage)
printf("Useless lines to avoid a warning");
const int voxelNumber = targetImage->nx*targetImage->ny*targetImage->nz;
const int3 imageSize=make_int3(targetImage->nx,targetImage->ny,targetImage->nz);
const int binNumber = refBinning*floBinning+refBinning+floBinning;
const float4 entropies_h=make_float4((float)entropies[0],(float)entropies[1],(float)entropies[2],(float)entropies[3]);
const float NMI = (float)((entropies[0]+entropies[1])/entropies[2]);
// Bind Symbols
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(int3)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstTargetBin,&refBinning,sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstResultBin,&floBinning,sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture bindingcurrentFloating
//Bind target image array to a 3D texture
firstTargetImageTexture.normalized = true;
firstTargetImageTexture.filterMode = cudaFilterModeLinear;
firstTargetImageTexture.addressMode[0] = cudaAddressModeWrap;
firstTargetImageTexture.addressMode[1] = cudaAddressModeWrap;
firstTargetImageTexture.addressMode[2] = cudaAddressModeWrap;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
NR_CUDA_SAFE_CALL(cudaBindTextureToArray(firstTargetImageTexture, *targetImageArray_d, channelDesc))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstResultImageTexture, *resultImageArray_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstResultImageGradientTexture, *resultGradientArray_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW =
(unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW));
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW,Grid_reg_getVoxelBasedNMIGradientUsingPW,1);
reg_getVoxelBasedNMIGradientUsingPW_kernel <<< G1, B1 >>> (*voxelNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstTargetImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstResultImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstResultImageGradientTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture));
}
/// Called when we have two target and two source image
void reg_getVoxelBasedNMIGradientUsingPW2x2_gpu(nifti_image *targetImage,
nifti_image *resultImage,
cudaArray **targetImageArray1_d,
cudaArray **targetImageArray2_d,
float **resultImageArray1_d,
float **resultImageArray2_d,
float4 **resultGradientArray1_d,
float4 **resultGradientArray2_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
unsigned int *targetBinning,
unsigned int *resultBinning)
{
if (targetImage->nt != 2 || resultImage->nt != 2) {
printf("[NiftyReg CUDA] reg_getVoxelBasedNMIGradientUsingPW2x2_gpu: This kernel should only be used with two target and source images\n");
return;
}
const int voxelNumber = targetImage->nx*targetImage->ny*targetImage->nz;
const int3 imageSize=make_int3(targetImage->nx,targetImage->ny,targetImage->nz);
const float4 entropies_h=make_float4((float)entropies[0],(float)entropies[1],(float)entropies[2],(float)entropies[3]);
const float NMI = (float)((entropies[0]+entropies[1])/entropies[2]);
const int binNumber = targetBinning[0]*targetBinning[1]*resultBinning[0]*resultBinning[1] + (targetBinning[0]*targetBinning[1]) + (resultBinning[0]*resultBinning[1]);
// Bind Symbols
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(int3)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstTargetBin,&targetBinning[0],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_secondTargetBin,&targetBinning[1],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstResultBin,&resultBinning[0],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_secondResultBin,&resultBinning[1],sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture binding
firstTargetImageTexture.normalized = true;
firstTargetImageTexture.filterMode = cudaFilterModeLinear;
firstTargetImageTexture.addressMode[0] = cudaAddressModeWrap;
firstTargetImageTexture.addressMode[1] = cudaAddressModeWrap;
firstTargetImageTexture.addressMode[2] = cudaAddressModeWrap;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
NR_CUDA_SAFE_CALL(cudaBindTextureToArray(firstTargetImageTexture, *targetImageArray1_d, channelDesc))
NR_CUDA_SAFE_CALL(cudaBindTextureToArray(secondTargetImageTexture, *targetImageArray2_d, channelDesc))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstResultImageTexture, *resultImageArray1_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, secondResultImageTexture, *resultImageArray2_d, voxelNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstResultImageGradientTexture, *resultGradientArray1_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, secondResultImageGradientTexture, *resultGradientArray2_d, voxelNumber*sizeof(float4)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
NR_CUDA_SAFE_CALL(cudaMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW2x2 =
(unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW2x2));
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW2x2,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW2x2,Grid_reg_getVoxelBasedNMIGradientUsingPW2x2,1);
reg_getVoxelBasedNMIGradientUsingPW2x2_kernel <<< G1, B1 >>> (*voxelNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstTargetImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondTargetImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstResultImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondResultImageTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstResultImageGradientTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondResultImageGradientTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture));
NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture));
}
#endif
|
966d660cd7b530a97592d84be8edc7f8472b0d27.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <aten/src/ATen/TensorUtils.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = ::log(input_val);
scalar_t log_1_minus_input_val = ::log(one - input_val);
log_input_val = ::max(log_input_val, neg_100);
log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
const int NLL_LOSS_THREADS = 32;
template <typename scalar_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
int64_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
int64_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 & threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
int64_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average && nframe == 0) {
// Mean reduction on empty tensors produces NaN
*output = std::numeric_limits<double>::quiet_NaN();
} else if (size_average && total_weight_acc != 0) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
Tensor& output,
Tensor& total_weight,
const Tensor& input,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned =
at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
TORCH_CHECK(
target.dim() == 1,
"1D target tensor expected, multi-target not supported");
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
TORCH_CHECK(n_dims > 0 && n_dims <= 2, "input tensor should be 1D or 2D");
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
int64_t num_targets = target.size(0);
TORCH_CHECK(
batch_size == num_targets,
"size mismatch (got input: ",
input.sizes(),
", target: ",
target.sizes(),
")")
TORCH_CHECK(
!weight.defined() || (weight.dim() == 1 && weight.numel() == n_classes),
"weight tensor should be defined either for all ",
n_classes,
" classes or no classes"
" but got weight tensor of shape: ",
weight.sizes());
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None & n_dims == 2) {
output.resize_({batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<int64_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return;
}
output.resize_({});
total_weight.resize_({});
auto input_ = input.contiguous();
auto target_ = target.contiguous();
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, float>)
, dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
}
template <typename scalar_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
int64_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
int64_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
if (*total_weight <= 0) {
return;
}
scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0];
}
};
template <typename scalar_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
int64_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
if (*total_weight <= 0) {
return;
}
scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0];
}
}
};
void nll_loss_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input,
const Tensor& target,
const Tensor& total_weight,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
TORCH_CHECK(
target.dim() == 1,
"1D target tensor expected, multi-target not supported");
int64_t n_dims = input.dim();
TORCH_CHECK(
n_dims > 0 && n_dims <= 2, "input tensor should be 1D or 2D");
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
int64_t num_targets = target.size(0);
TORCH_CHECK(
batch_size == num_targets,
"size mismatch (got input: ",
input.sizes(),
", target: ",
target.sizes(),
")")
TORCH_CHECK(
!weight.defined() || (weight.dim() == 1 && weight.numel() == n_classes),
"weight tensor should be defined either for all or no classes");
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
check_dim_size(grad_output, 1, 0, batch_size);
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
target.data_ptr<int64_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return;
}
auto target_ = target.contiguous();
TORCH_CHECK(grad_output.numel() == 1);
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
target.data_ptr<int64_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
scalar_t* weight_data = nullptr;
if (weight.defined()) {
auto weight_ = weight.contiguous();
weight_data = weight_.data_ptr<scalar_t>();
}
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t>)
, dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
}
} // namespace
std::tuple<Tensor&, Tensor&> nll_loss_forward_out_cuda(
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
Tensor& output,
Tensor& total_weight) {
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight_opt, reduction, ignore_index);
return std::tuple<Tensor&, Tensor&>(output, total_weight);
}
std::tuple<Tensor, Tensor> nll_loss_forward_cuda(
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
auto output = at::empty({0}, self.options());
auto total_weight = at::empty({0}, self.options());
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight_opt, reduction, ignore_index);
return std::make_tuple(output, total_weight);
}
Tensor& nll_loss_backward_out_cuda(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
Tensor& grad_input) {
grad_input.resize_as_(self);
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight_opt,
reduction,
ignore_index);
return grad_input;
}
Tensor nll_loss_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target, const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight) {
auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight_opt,
reduction,
ignore_index);
return grad_input;
}
}} // namespace at::native
|
966d660cd7b530a97592d84be8edc7f8472b0d27.cu
|
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <aten/src/ATen/TensorUtils.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = std::log(input_val);
scalar_t log_1_minus_input_val = std::log(one - input_val);
log_input_val = std::max(log_input_val, neg_100);
log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
const int NLL_LOSS_THREADS = 32;
template <typename scalar_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
int64_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
int64_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 & threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
int64_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average && nframe == 0) {
// Mean reduction on empty tensors produces NaN
*output = std::numeric_limits<double>::quiet_NaN();
} else if (size_average && total_weight_acc != 0) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
Tensor& output,
Tensor& total_weight,
const Tensor& input,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned =
at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
TORCH_CHECK(
target.dim() == 1,
"1D target tensor expected, multi-target not supported");
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
TORCH_CHECK(n_dims > 0 && n_dims <= 2, "input tensor should be 1D or 2D");
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
int64_t num_targets = target.size(0);
TORCH_CHECK(
batch_size == num_targets,
"size mismatch (got input: ",
input.sizes(),
", target: ",
target.sizes(),
")")
TORCH_CHECK(
!weight.defined() || (weight.dim() == 1 && weight.numel() == n_classes),
"weight tensor should be defined either for all ",
n_classes,
" classes or no classes"
" but got weight tensor of shape: ",
weight.sizes());
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None & n_dims == 2) {
output.resize_({batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
nll_loss_forward_no_reduce_cuda_kernel<scalar_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<int64_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return;
}
output.resize_({});
total_weight.resize_({});
auto input_ = input.contiguous();
auto target_ = target.contiguous();
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
nll_loss_forward_reduce_cuda_kernel_1d<scalar_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, float>
<<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}
template <typename scalar_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
int64_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
int64_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
if (*total_weight <= 0) {
return;
}
scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0];
}
};
template <typename scalar_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
int64_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
if (*total_weight <= 0) {
return;
}
scalar_t norm = size_average ? (static_cast<scalar_t>(1) / *total_weight) : static_cast<scalar_t>(1);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = -(weights != nullptr ? weights[t] : static_cast<scalar_t>(1)) * norm * grad_output[0];
}
}
};
void nll_loss_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input,
const Tensor& target,
const Tensor& total_weight,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
TORCH_CHECK(
target.dim() == 1,
"1D target tensor expected, multi-target not supported");
int64_t n_dims = input.dim();
TORCH_CHECK(
n_dims > 0 && n_dims <= 2, "input tensor should be 1D or 2D");
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
int64_t num_targets = target.size(0);
TORCH_CHECK(
batch_size == num_targets,
"size mismatch (got input: ",
input.sizes(),
", target: ",
target.sizes(),
")")
TORCH_CHECK(
!weight.defined() || (weight.dim() == 1 && weight.numel() == n_classes),
"weight tensor should be defined either for all or no classes");
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
check_dim_size(grad_output, 1, 0, batch_size);
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
nll_loss_backward_no_reduce_cuda_kernel<scalar_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
target.data_ptr<int64_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return;
}
auto target_ = target.contiguous();
TORCH_CHECK(grad_output.numel() == 1);
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
nll_loss_backward_reduce_cuda_kernel_1d<scalar_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
target.data_ptr<int64_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
scalar_t* weight_data = nullptr;
if (weight.defined()) {
auto weight_ = weight.contiguous();
weight_data = weight_.data_ptr<scalar_t>();
}
nll_loss_backward_reduce_cuda_kernel_2d<scalar_t>
<<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}
} // namespace
std::tuple<Tensor&, Tensor&> nll_loss_forward_out_cuda(
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
Tensor& output,
Tensor& total_weight) {
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight_opt, reduction, ignore_index);
return std::tuple<Tensor&, Tensor&>(output, total_weight);
}
std::tuple<Tensor, Tensor> nll_loss_forward_cuda(
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index) {
auto output = at::empty({0}, self.options());
auto total_weight = at::empty({0}, self.options());
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight_opt, reduction, ignore_index);
return std::make_tuple(output, total_weight);
}
Tensor& nll_loss_backward_out_cuda(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
Tensor& grad_input) {
grad_input.resize_as_(self);
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight_opt,
reduction,
ignore_index);
return grad_input;
}
Tensor nll_loss_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target, const c10::optional<Tensor>& weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight) {
auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight_opt,
reduction,
ignore_index);
return grad_input;
}
}} // namespace at::native
|
cf672fb66c3397ef28c8c7f411671c9f05510965.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/search.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/search.hpp>
#include <hash/unordered_multiset.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/strings/detail/utilities.hpp>
#include <thrust/binary_search.h>
#include <thrust/logical.h>
namespace cudf {
namespace {
template <typename DataIterator,
typename ValuesIterator,
typename OutputIterator,
typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
size_type data_size,
size_type values_size,
OutputIterator it_output,
Comparator comp,
bool find_first,
hipStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
} else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
}
}
std::unique_ptr<column> search_ordered(table_view const& t,
table_view const& values,
bool find_first,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
// Allocate result column
std::unique_ptr<column> result = make_numeric_column(
data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view result_view = result.get()->mutable_view();
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(hipMemset(result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type)));
return result;
}
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null precedence.");
}
auto d_t = table_device_view::create(t, stream);
auto d_values = table_device_view::create(values, stream);
auto count_it = thrust::make_counting_iterator<size_type>(0);
rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end());
rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end());
if (has_nulls(t) or has_nulls(values)) {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<true>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<true>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
} else {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<false>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<false>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
}
return result;
}
struct contains_scalar_dispatch {
template <typename Element>
bool operator()(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
using ScalarType = cudf::scalar_type_t<Element>;
auto d_col = column_device_view::create(col, stream);
auto s = static_cast<const ScalarType*>(&value);
if (col.has_nulls()) {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->pair_begin<Element, true>(),
d_col->pair_end<Element, true>(),
thrust::make_pair(s->value(), true));
return found_iter != d_col->pair_end<Element, true>();
} else {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->begin<Element>(),
d_col->end<Element>(),
s->value());
return found_iter != d_col->end<Element>();
}
}
};
template <>
bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("dictionary type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("list_view type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::struct_view>(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("struct_view type not supported yet");
}
} // namespace
namespace detail {
bool contains(column_view const& col,
scalar const& value,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(col.type() == value.type(), "DTYPE mismatch");
if (col.size() == 0) { return false; }
if (not value.is_valid()) { return col.has_nulls(); }
return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream, mr);
}
struct multi_contains_dispatch {
template <typename Element>
std::unique_ptr<column> operator()(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()},
haystack.size(),
copy_bitmask(haystack),
haystack.null_count(),
stream,
mr);
if (haystack.size() == 0) { return result; }
mutable_column_view result_view = result.get()->mutable_view();
if (needles.size() == 0) {
thrust::fill(rmm::exec_policy(stream)->on(stream),
result_view.begin<bool>(),
result_view.end<bool>(),
false);
return result;
}
auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream);
auto device_hash_set = hash_set.to_device();
auto d_haystack_ptr = column_device_view::create(haystack, stream);
auto d_haystack = *d_haystack_ptr;
if (haystack.has_nulls()) {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return d_haystack.is_null_nocheck(index) ||
device_hash_set.contains(d_haystack.element<Element>(index));
});
} else {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return device_hash_set.contains(d_haystack.element<Element>(index));
});
}
return result;
}
};
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("dictionary type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<struct_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("struct_view type not supported");
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch");
return cudf::type_dispatcher(
haystack.type(), multi_contains_dispatch{}, haystack, needles, mr, stream);
}
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return search_ordered(t, values, true, column_order, null_precedence, mr, stream);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return search_ordered(t, values, false, column_order, null_precedence, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::lower_bound(t, values, column_order, null_precedence, mr);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::upper_bound(t, values, column_order, null_precedence, mr);
}
bool contains(column_view const& col, scalar const& value, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(col, value, mr);
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(haystack, needles, mr);
}
} // namespace cudf
|
cf672fb66c3397ef28c8c7f411671c9f05510965.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/search.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/search.hpp>
#include <hash/unordered_multiset.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/strings/detail/utilities.hpp>
#include <thrust/binary_search.h>
#include <thrust/logical.h>
namespace cudf {
namespace {
template <typename DataIterator,
typename ValuesIterator,
typename OutputIterator,
typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
size_type data_size,
size_type values_size,
OutputIterator it_output,
Comparator comp,
bool find_first,
cudaStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
} else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
}
}
std::unique_ptr<column> search_ordered(table_view const& t,
table_view const& values,
bool find_first,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
// Allocate result column
std::unique_ptr<column> result = make_numeric_column(
data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view result_view = result.get()->mutable_view();
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(cudaMemset(result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type)));
return result;
}
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null precedence.");
}
auto d_t = table_device_view::create(t, stream);
auto d_values = table_device_view::create(values, stream);
auto count_it = thrust::make_counting_iterator<size_type>(0);
rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end());
rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end());
if (has_nulls(t) or has_nulls(values)) {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<true>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<true>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
} else {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<false>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<false>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
}
return result;
}
struct contains_scalar_dispatch {
template <typename Element>
bool operator()(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
using ScalarType = cudf::scalar_type_t<Element>;
auto d_col = column_device_view::create(col, stream);
auto s = static_cast<const ScalarType*>(&value);
if (col.has_nulls()) {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->pair_begin<Element, true>(),
d_col->pair_end<Element, true>(),
thrust::make_pair(s->value(), true));
return found_iter != d_col->pair_end<Element, true>();
} else {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->begin<Element>(),
d_col->end<Element>(),
s->value());
return found_iter != d_col->end<Element>();
}
}
};
template <>
bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("dictionary type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("list_view type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::struct_view>(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("struct_view type not supported yet");
}
} // namespace
namespace detail {
bool contains(column_view const& col,
scalar const& value,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(col.type() == value.type(), "DTYPE mismatch");
if (col.size() == 0) { return false; }
if (not value.is_valid()) { return col.has_nulls(); }
return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream, mr);
}
struct multi_contains_dispatch {
template <typename Element>
std::unique_ptr<column> operator()(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()},
haystack.size(),
copy_bitmask(haystack),
haystack.null_count(),
stream,
mr);
if (haystack.size() == 0) { return result; }
mutable_column_view result_view = result.get()->mutable_view();
if (needles.size() == 0) {
thrust::fill(rmm::exec_policy(stream)->on(stream),
result_view.begin<bool>(),
result_view.end<bool>(),
false);
return result;
}
auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream);
auto device_hash_set = hash_set.to_device();
auto d_haystack_ptr = column_device_view::create(haystack, stream);
auto d_haystack = *d_haystack_ptr;
if (haystack.has_nulls()) {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return d_haystack.is_null_nocheck(index) ||
device_hash_set.contains(d_haystack.element<Element>(index));
});
} else {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return device_hash_set.contains(d_haystack.element<Element>(index));
});
}
return result;
}
};
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("dictionary type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<struct_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("struct_view type not supported");
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch");
return cudf::type_dispatcher(
haystack.type(), multi_contains_dispatch{}, haystack, needles, mr, stream);
}
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return search_ordered(t, values, true, column_order, null_precedence, mr, stream);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return search_ordered(t, values, false, column_order, null_precedence, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::lower_bound(t, values, column_order, null_precedence, mr);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::upper_bound(t, values, column_order, null_precedence, mr);
}
bool contains(column_view const& col, scalar const& value, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(col, value, mr);
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(haystack, needles, mr);
}
} // namespace cudf
|
0b0b695e8033dc6b14066e5d219e52421875ef76.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_d
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, double x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magmablas_dnrm2_kernel( int m, double *da, int ldda, double *dxnorm )
{
const int i = threadIdx.x;
double *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_dnrm2_check_kernel( int m, double *da, int ldda, double *dxnorm,
double *lsticc )
{
const int i = threadIdx.x;
double *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if( lsticc[blockIdx.x + 1] == 0 ) return;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_dnrm2_check(
magma_int_t m, magma_int_t n, double *da, magma_int_t ldda,
double *dxnorm, double *lsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_dnrm2_check_kernel), dim3(blocks), dim3(threads) , 0, 0, m, da, ldda, dxnorm, lsticc );
}
//==============================================================================
__global__ void
magmablas_dnrm2_smkernel( int m, int n, double *da, int ldda,
double *dxnorm )
{
const int i = threadIdx.x, c= threadIdx.y;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double re, lsum;
for( int k = c; k < n; k+= BLOCK_SIZEy)
{
double *dx = da + k * ldda;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i][c] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, c, sum );
if (i==0)
dxnorm[k] = sqrt(sum[0][c]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the dnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_dnrm2_sm(
magma_int_t m, magma_int_t n, double *da, magma_int_t ldda,
double *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magmablas_dnrm2_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, da, ldda, dxnorm );
}
//==============================================================================
static
__device__ void dsum_reduce( int n, int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_dnrm2_adjust_kernel(double *xnorm, double *c)
{
const int i = threadIdx.x;
__shared__ double sum[ BLOCK_SIZE ];
double temp;
temp = MAGMA_D_ABS( c[i] ) / xnorm[0];
sum[i] = -temp * temp;
dsum_reduce( blockDim.x, i, sum );
__syncthreads();
if (i==0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assumin that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_dnrm2_adjust(magma_int_t k, double *xnorm, double *c)
{
hipLaunchKernelGGL(( magma_dnrm2_adjust_kernel), dim3(1), dim3(k), 0, magma_stream , xnorm, c);
}
//==============================================================================
#define BS 256
__global__ void
magma_dnrm2_row_check_adjust_kernel(int n, double tol, double *xnorm, double *xnorm2,
double *c, int ldc, double *lsticc)
{
const int i = threadIdx.x + blockIdx.x*BS;
lsticc[i+1] = 0;
if (i<n){
double temp = MAGMA_D_ABS( c[i*ldc] ) / xnorm[i];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
double temp2 = xnorm[i] / xnorm2[i];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[i+1] = 1;
} else {
xnorm[i] *= sqrt(temp);
}
}
if( i==0 ) lsticc[0] = 0;
dsum_reduce( blockDim.x, i, lsticc );
}
/*
Adjust the norm of c[,1:k] to give the norm of c[k+1:,1:k], assuming that
c was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_dnrm2_row_check_adjust(
magma_int_t k, double tol, double *xnorm, double *xnorm2,
double *c, magma_int_t ldc, double *lsticc)
{
int nblocks = (k+BS-1)/BS;
hipLaunchKernelGGL(( magma_dnrm2_row_check_adjust_kernel), dim3(nblocks), dim3(BS) , 0, 0, k, tol, xnorm, xnorm2, c, ldc, lsticc);
}
//==============================================================================
/*
Compute the dnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_dnrm2_cols(
magma_int_t m, magma_int_t n,
double *da, magma_int_t ldda,
double *dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_dnrm2_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, da, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_dnrm2_sm(m, n, da, ldda, dxnorm);
}
//==============================================================================
|
0b0b695e8033dc6b14066e5d219e52421875ef76.cu
|
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_d
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, double x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magmablas_dnrm2_kernel( int m, double *da, int ldda, double *dxnorm )
{
const int i = threadIdx.x;
double *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_dnrm2_check_kernel( int m, double *da, int ldda, double *dxnorm,
double *lsticc )
{
const int i = threadIdx.x;
double *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if( lsticc[blockIdx.x + 1] == 0 ) return;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_dnrm2_check(
magma_int_t m, magma_int_t n, double *da, magma_int_t ldda,
double *dxnorm, double *lsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_dnrm2_check_kernel<<< blocks, threads >>>( m, da, ldda, dxnorm, lsticc );
}
//==============================================================================
__global__ void
magmablas_dnrm2_smkernel( int m, int n, double *da, int ldda,
double *dxnorm )
{
const int i = threadIdx.x, c= threadIdx.y;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double re, lsum;
for( int k = c; k < n; k+= BLOCK_SIZEy)
{
double *dx = da + k * ldda;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_D_REAL( dx[j] );
double im = MAGMA_D_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i][c] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, c, sum );
if (i==0)
dxnorm[k] = sqrt(sum[0][c]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the dnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_dnrm2_sm(
magma_int_t m, magma_int_t n, double *da, magma_int_t ldda,
double *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magmablas_dnrm2_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, da, ldda, dxnorm );
}
//==============================================================================
static
__device__ void dsum_reduce( int n, int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_dnrm2_adjust_kernel(double *xnorm, double *c)
{
const int i = threadIdx.x;
__shared__ double sum[ BLOCK_SIZE ];
double temp;
temp = MAGMA_D_ABS( c[i] ) / xnorm[0];
sum[i] = -temp * temp;
dsum_reduce( blockDim.x, i, sum );
__syncthreads();
if (i==0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assumin that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_dnrm2_adjust(magma_int_t k, double *xnorm, double *c)
{
magma_dnrm2_adjust_kernel<<< 1, k, 0, magma_stream >>> (xnorm, c);
}
//==============================================================================
#define BS 256
__global__ void
magma_dnrm2_row_check_adjust_kernel(int n, double tol, double *xnorm, double *xnorm2,
double *c, int ldc, double *lsticc)
{
const int i = threadIdx.x + blockIdx.x*BS;
lsticc[i+1] = 0;
if (i<n){
double temp = MAGMA_D_ABS( c[i*ldc] ) / xnorm[i];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
double temp2 = xnorm[i] / xnorm2[i];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[i+1] = 1;
} else {
xnorm[i] *= sqrt(temp);
}
}
if( i==0 ) lsticc[0] = 0;
dsum_reduce( blockDim.x, i, lsticc );
}
/*
Adjust the norm of c[,1:k] to give the norm of c[k+1:,1:k], assuming that
c was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_dnrm2_row_check_adjust(
magma_int_t k, double tol, double *xnorm, double *xnorm2,
double *c, magma_int_t ldc, double *lsticc)
{
int nblocks = (k+BS-1)/BS;
magma_dnrm2_row_check_adjust_kernel<<< nblocks, BS >>> (k, tol, xnorm, xnorm2, c, ldc, lsticc);
}
//==============================================================================
/*
Compute the dnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_dnrm2_cols(
magma_int_t m, magma_int_t n,
double *da, magma_int_t ldda,
double *dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_dnrm2_kernel<<< blocks, threads, 0, magma_stream >>>( m, da, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_dnrm2_sm(m, n, da, ldda, dxnorm);
}
//==============================================================================
|
5f07d17c2790f94db6dd9236623925dada80224b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_fp16.h>
#include <algorithm>
#include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T>
__device__ int upper_bound(T const* vals, int n, T const& key) {
int i = 0;
while (n > 0) {
int m = n / 2;
int j = i + m;
if (!(key < vals[j])) {
i = j + 1;
n -= m + 1;
} else {
n = m;
}
}
return i;
}
nvinfer1::Dims SplitPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* input_dims, int num_inputs) {
PADDLE_ENFORCE_EQ(num_inputs, 1,
platform::errors::InvalidArgument(
"Invalid number of inputs of split TRT plugin. "
"Expected 1, received %d.",
num_inputs));
PADDLE_ENFORCE_LT(
index, this->getNbOutputs(),
platform::errors::InvalidArgument(
"Index of output should be less than the total number of outputs in "
"split TensorRT plugin. Received index = %d >= total outputs = %d",
index, this->getNbOutputs()));
nvinfer1::Dims output_dims = input_dims[0];
output_dims.d[axis_] = output_length_.at(index);
return output_dims;
}
void SplitPlugin::shareData(const SplitPlugin* another) {
outer_rows_ = another->outer_rows_;
inner_cols_ = another->inner_cols_;
same_shape_ = another->same_shape_;
axis_shape_ = another->axis_shape_;
d_segment_offsets_ = another->d_segment_offsets_;
segment_offsets_ = another->segment_offsets_;
d_output_ptrs_.resize(another->d_output_ptrs_.size(), nullptr);
}
int SplitPlugin::initialize() {
PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS,
platform::errors::InvalidArgument(
"Axis dimension exceeds max dimension in TensorRT. "
"Received axis = %d > MAX_DIMS = %d",
axis_, nvinfer1::Dims::MAX_DIMS));
// notice input dims is [C, H, W]
nvinfer1::Dims dims = this->getInputDims(0);
outer_rows_ = 1;
inner_cols_ = 1;
for (int i = 0; i < axis_; ++i) {
outer_rows_ *= dims.d[i];
}
for (int i = axis_ + 1; i < dims.nbDims; ++i) {
inner_cols_ *= dims.d[i];
}
same_shape_ = true;
std::vector<int> segment_offsets(1, 0);
for (int i = 0; i < this->getNbOutputs(); ++i) {
if (output_length_[i] != output_length_[0]) {
same_shape_ = false;
}
segment_offsets.push_back(segment_offsets.back() + output_length_[i]);
}
axis_shape_ = dims.d[axis_];
d_segment_offsets_ = segment_offsets;
segment_offsets_ = std::move(segment_offsets);
d_output_ptrs_.resize(this->getNbOutputs(), nullptr);
return 0;
}
// nothing to release according to initialize
void SplitPlugin::terminate() {}
// The following part of the code refers to onnx-tensorrt
// https://github.com/onnx/onnx-tensorrt/blob/master/Split.cu
template <typename T>
__global__ void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata, T* const* odatas,
int inner_cols, int axis_shape, int outer_rows) {
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for (int z = z0; z < outer_rows; z += blockDim.z * gridDim.z) {
for (int src_y = src_y0; src_y < axis_shape;
src_y += blockDim.y * gridDim.y) {
for (int x = x0; x < inner_cols; x += blockDim.x * gridDim.x) {
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + inner_cols * (dst_y + dst_ny * z)] =
idata[x + inner_cols * (src_y + axis_shape * z)];
}
}
}
}
int SplitPlugin::enqueue(int batchSize, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace, hipStream_t stream) {
#else
void* const* outputs, void* workspace,
hipStream_t stream) {
#endif
const int* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&d_segment_offsets_[0]);
float const* input_ptr = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*),
hipMemcpyHostToDevice, stream));
int outer_rows = outer_rows_ * batchSize;
dim3 block(32, 16);
dim3 grid(::min((inner_cols_ - 1) / block.x + 1, 65535u),
::min((axis_shape_ - 1) / block.y + 1, 65535u),
::min((outer_rows_ - 1) / block.z + 1, 65535u));
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
d_segment_offsets_.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols_, axis_shape_, outer_rows);
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SplitPluginDynamic::initialize() { return 0; }
size_t SplitPluginDynamic::getSerializationSize() const {
return SerializedSize(axis_) + SerializedSize(output_length_) +
SerializedSize(with_fp16_);
}
void SplitPluginDynamic::serialize(void* buffer) const {
SerializeValue(&buffer, axis_);
SerializeValue(&buffer, output_length_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SplitPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) {
PADDLE_ENFORCE_EQ(nb_inputs, 1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
PADDLE_ENFORCE_LT(output_index, output_length_.size(),
platform::errors::InvalidArgument(
"When GetOutputDimensions, the index(%d) should not "
"greater the num(%d) of the outpus.",
output_index, output_length_.size()));
nvinfer1::DimsExprs output_dims = inputs[0];
output_dims.d[axis_] = expr_builder.constant(output_length_.at(output_index));
return output_dims;
}
bool SplitPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of split plugin should not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SplitPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* input_types, int nb_inputs) const {
return input_types[0];
}
int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs, void* const* outputs,
void* workspace, hipStream_t stream) {
auto input_dims = input_desc[0].dims;
int outer_rows = 1;
int inner_cols = 1;
// with batch
for (int i = 0; i < axis_; i++) {
outer_rows *= input_dims.d[i];
}
for (int i = axis_ + 1; i < input_dims.nbDims; i++) {
inner_cols *= input_dims.d[i];
}
std::vector<int> segment_offsets(1, 0);
for (int i = 0; i < this->getNbOutputs(); i++) {
segment_offsets.push_back(segment_offsets.back() + output_length_[i]);
}
int axis_shape = input_dims.d[axis_];
thrust::device_vector<int> d_segment_offsets = segment_offsets;
const int* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&d_segment_offsets[0]);
dim3 block(32, 16);
dim3 grid(::min((inner_cols - 1) / block.x + 1, 65535u),
::min((axis_shape - 1) / block.y + 1, 65535u),
::min((outer_rows - 1) / block.z + 1, 65535u));
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Split-->fp32";
thrust::device_vector<float*> d_output_ptrs;
d_output_ptrs.resize(this->getNbOutputs(), nullptr);
const float* input_ptr = static_cast<const float*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(float*),
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols, axis_shape, outer_rows);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Split-->fp16";
thrust::device_vector<half*> d_output_ptrs;
d_output_ptrs.resize(this->getNbOutputs(), nullptr);
const half* input_ptr = static_cast<const half*>(inputs[0]);
half* const* h_odatas = reinterpret_cast<half* const*>(outputs);
half** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(hipMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(half*),
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols, axis_shape, outer_rows);
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
5f07d17c2790f94db6dd9236623925dada80224b.cu
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_fp16.h>
#include <algorithm>
#include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T>
__device__ int upper_bound(T const* vals, int n, T const& key) {
int i = 0;
while (n > 0) {
int m = n / 2;
int j = i + m;
if (!(key < vals[j])) {
i = j + 1;
n -= m + 1;
} else {
n = m;
}
}
return i;
}
nvinfer1::Dims SplitPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* input_dims, int num_inputs) {
PADDLE_ENFORCE_EQ(num_inputs, 1,
platform::errors::InvalidArgument(
"Invalid number of inputs of split TRT plugin. "
"Expected 1, received %d.",
num_inputs));
PADDLE_ENFORCE_LT(
index, this->getNbOutputs(),
platform::errors::InvalidArgument(
"Index of output should be less than the total number of outputs in "
"split TensorRT plugin. Received index = %d >= total outputs = %d",
index, this->getNbOutputs()));
nvinfer1::Dims output_dims = input_dims[0];
output_dims.d[axis_] = output_length_.at(index);
return output_dims;
}
void SplitPlugin::shareData(const SplitPlugin* another) {
outer_rows_ = another->outer_rows_;
inner_cols_ = another->inner_cols_;
same_shape_ = another->same_shape_;
axis_shape_ = another->axis_shape_;
d_segment_offsets_ = another->d_segment_offsets_;
segment_offsets_ = another->segment_offsets_;
d_output_ptrs_.resize(another->d_output_ptrs_.size(), nullptr);
}
int SplitPlugin::initialize() {
PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS,
platform::errors::InvalidArgument(
"Axis dimension exceeds max dimension in TensorRT. "
"Received axis = %d > MAX_DIMS = %d",
axis_, nvinfer1::Dims::MAX_DIMS));
// notice input dims is [C, H, W]
nvinfer1::Dims dims = this->getInputDims(0);
outer_rows_ = 1;
inner_cols_ = 1;
for (int i = 0; i < axis_; ++i) {
outer_rows_ *= dims.d[i];
}
for (int i = axis_ + 1; i < dims.nbDims; ++i) {
inner_cols_ *= dims.d[i];
}
same_shape_ = true;
std::vector<int> segment_offsets(1, 0);
for (int i = 0; i < this->getNbOutputs(); ++i) {
if (output_length_[i] != output_length_[0]) {
same_shape_ = false;
}
segment_offsets.push_back(segment_offsets.back() + output_length_[i]);
}
axis_shape_ = dims.d[axis_];
d_segment_offsets_ = segment_offsets;
segment_offsets_ = std::move(segment_offsets);
d_output_ptrs_.resize(this->getNbOutputs(), nullptr);
return 0;
}
// nothing to release according to initialize
void SplitPlugin::terminate() {}
// The following part of the code refers to onnx-tensorrt
// https://github.com/onnx/onnx-tensorrt/blob/master/Split.cu
template <typename T>
__global__ void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata, T* const* odatas,
int inner_cols, int axis_shape, int outer_rows) {
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for (int z = z0; z < outer_rows; z += blockDim.z * gridDim.z) {
for (int src_y = src_y0; src_y < axis_shape;
src_y += blockDim.y * gridDim.y) {
for (int x = x0; x < inner_cols; x += blockDim.x * gridDim.x) {
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + inner_cols * (dst_y + dst_ny * z)] =
idata[x + inner_cols * (src_y + axis_shape * z)];
}
}
}
}
int SplitPlugin::enqueue(int batchSize, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace, cudaStream_t stream) {
#else
void* const* outputs, void* workspace,
cudaStream_t stream) {
#endif
const int* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&d_segment_offsets_[0]);
float const* input_ptr = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*),
cudaMemcpyHostToDevice, stream));
int outer_rows = outer_rows_ * batchSize;
dim3 block(32, 16);
dim3 grid(std::min((inner_cols_ - 1) / block.x + 1, 65535u),
std::min((axis_shape_ - 1) / block.y + 1, 65535u),
std::min((outer_rows_ - 1) / block.z + 1, 65535u));
split_kernel<<<grid, block, 0, stream>>>(
d_segment_offsets_.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols_, axis_shape_, outer_rows);
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SplitPluginDynamic::initialize() { return 0; }
size_t SplitPluginDynamic::getSerializationSize() const {
return SerializedSize(axis_) + SerializedSize(output_length_) +
SerializedSize(with_fp16_);
}
void SplitPluginDynamic::serialize(void* buffer) const {
SerializeValue(&buffer, axis_);
SerializeValue(&buffer, output_length_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SplitPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) {
PADDLE_ENFORCE_EQ(nb_inputs, 1,
platform::errors::InvalidArgument(
"The Split plugin should be only one input."));
PADDLE_ENFORCE_LT(output_index, output_length_.size(),
platform::errors::InvalidArgument(
"When GetOutputDimensions, the index(%d) should not "
"greater the num(%d) of the outpus.",
output_index, output_length_.size()));
nvinfer1::DimsExprs output_dims = inputs[0];
output_dims.d[axis_] = expr_builder.constant(output_length_.at(output_index));
return output_dims;
}
bool SplitPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of split plugin should not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SplitPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* input_types, int nb_inputs) const {
return input_types[0];
}
int SplitPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs, void* const* outputs,
void* workspace, cudaStream_t stream) {
auto input_dims = input_desc[0].dims;
int outer_rows = 1;
int inner_cols = 1;
// with batch
for (int i = 0; i < axis_; i++) {
outer_rows *= input_dims.d[i];
}
for (int i = axis_ + 1; i < input_dims.nbDims; i++) {
inner_cols *= input_dims.d[i];
}
std::vector<int> segment_offsets(1, 0);
for (int i = 0; i < this->getNbOutputs(); i++) {
segment_offsets.push_back(segment_offsets.back() + output_length_[i]);
}
int axis_shape = input_dims.d[axis_];
thrust::device_vector<int> d_segment_offsets = segment_offsets;
const int* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&d_segment_offsets[0]);
dim3 block(32, 16);
dim3 grid(std::min((inner_cols - 1) / block.x + 1, 65535u),
std::min((axis_shape - 1) / block.y + 1, 65535u),
std::min((outer_rows - 1) / block.z + 1, 65535u));
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Split-->fp32";
thrust::device_vector<float*> d_output_ptrs;
d_output_ptrs.resize(this->getNbOutputs(), nullptr);
const float* input_ptr = static_cast<const float*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(float*),
cudaMemcpyHostToDevice, stream));
split_kernel<<<grid, block, 0, stream>>>(
d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols, axis_shape, outer_rows);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Split-->fp16";
thrust::device_vector<half*> d_output_ptrs;
d_output_ptrs.resize(this->getNbOutputs(), nullptr);
const half* input_ptr = static_cast<const half*>(inputs[0]);
half* const* h_odatas = reinterpret_cast<half* const*>(outputs);
half** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs[0]);
PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync(
output_ptrs, h_odatas, d_output_ptrs.size() * sizeof(half*),
cudaMemcpyHostToDevice, stream));
split_kernel<<<grid, block, 0, stream>>>(
d_segment_offsets.size(), d_segment_offsets_ptr, input_ptr, output_ptrs,
inner_cols, axis_shape, outer_rows);
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
image_blur.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void imageblur( int* inputImage, int* outputImage, int filterSize, double* filter, int imageRow, int imageCol){
int pixelx = blockIdx.x * blockDim.x + threadIdx.x;
int pixely = blockIdx.y * blockDim.y + threadIdx.y;
double blur_value = 0.0;
if (pixelx >= imageCol || pixely >= imageRow) {
return;
}
//multiply with blur kernel
for (int finalx = 0; finalx < filterSize; finalx++) {
for (int finaly = 0; finaly < filterSize; finaly++) {
int imagex = pixelx + finalx - filterSize / 2 ;
int imagey = pixely + finaly - filterSize / 2;
int imagePixel;
if(imagex < 0 || imagex >= imageCol || imagey < 0 || imagey >= imageRow){
imagePixel = 0;
} else {
imagePixel = inputImage[imagey*imageCol+imagex];
}
blur_value += (filter[finaly*filterSize+finalx] * imagePixel);
}
}
outputImage[pixely*imageCol+pixelx] = (int)(blur_value/15.0);
}
int main(int argc, char const *argv[]) {
int imagex = 3, imagey = 3;
int numberOfPixels = imagex*imagey*sizeof(int);
int *d_image = 0; int *d_blurImage = 0; double *d_filter = 0; //device
int *h_image = 0; int *h_blurImage = 0; double *h_filter = 0; //host
//malloc memory device and host
h_image = (int*)malloc(numberOfPixels);
hipMalloc((void**)&d_image, numberOfPixels);
h_blurImage = (int*)malloc(numberOfPixels);
hipMalloc((void**)&d_blurImage, numberOfPixels);
h_filter = (double*)malloc(9*sizeof(double));
hipMalloc((void**)&d_filter, 9*sizeof(double));
if(h_image == 0 || d_image == 0 || h_blurImage == 0 || d_blurImage == 0){
printf("Could not allocate memory");
return 1;
}
//Initialise Filter
h_filter[0] = 1.0; h_filter[1] = 2.0; h_filter[2] = 1.0;
h_filter[3] = 2.0; h_filter[4] = 3.0; h_filter[5] = 2.0;
h_filter[6] = 1.0; h_filter[7] = 2.0; h_filter[8] = 1.0;
// Randomly Initialize Image
srand(time(NULL));
for(int i = 0; i < (imagex*imagey); i++){
h_image[i] = (rand() % 256);
}
//Copy host memory to device
hipMemcpy( d_image, h_image, numberOfPixels, hipMemcpyHostToDevice);
hipMemcpy( d_filter, h_filter, 9*sizeof(double), hipMemcpyHostToDevice);
const dim3 blockSize(4,4,1);
const dim3 gridSize(imagex/blockSize.x+1,imagey/blockSize.y+1,1);
//Call
hipLaunchKernelGGL(( imageblur), dim3(gridSize), dim3(blockSize), 0, 0, d_image, d_blurImage, 3, d_filter, imagey, imagex);
//copy blurred image to host
hipMemcpy(h_blurImage, d_blurImage, numberOfPixels, hipMemcpyDeviceToHost);
printf("Image : \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_image[i*imagex + j]);
}
printf("\n");
}
printf("Blur Image: \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_blurImage[i*imagex + j]);
}
printf("\n");
}
//Clean Memory
free(h_image); free(h_blurImage); free(h_filter);
hipFree(d_image); hipFree(d_blurImage); hipFree(d_filter);
return 0;
}
|
image_blur.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void imageblur( int* inputImage, int* outputImage, int filterSize, double* filter, int imageRow, int imageCol){
int pixelx = blockIdx.x * blockDim.x + threadIdx.x;
int pixely = blockIdx.y * blockDim.y + threadIdx.y;
double blur_value = 0.0;
if (pixelx >= imageCol || pixely >= imageRow) {
return;
}
//multiply with blur kernel
for (int finalx = 0; finalx < filterSize; finalx++) {
for (int finaly = 0; finaly < filterSize; finaly++) {
int imagex = pixelx + finalx - filterSize / 2 ;
int imagey = pixely + finaly - filterSize / 2;
int imagePixel;
if(imagex < 0 || imagex >= imageCol || imagey < 0 || imagey >= imageRow){
imagePixel = 0;
} else {
imagePixel = inputImage[imagey*imageCol+imagex];
}
blur_value += (filter[finaly*filterSize+finalx] * imagePixel);
}
}
outputImage[pixely*imageCol+pixelx] = (int)(blur_value/15.0);
}
int main(int argc, char const *argv[]) {
int imagex = 3, imagey = 3;
int numberOfPixels = imagex*imagey*sizeof(int);
int *d_image = 0; int *d_blurImage = 0; double *d_filter = 0; //device
int *h_image = 0; int *h_blurImage = 0; double *h_filter = 0; //host
//malloc memory device and host
h_image = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_image, numberOfPixels);
h_blurImage = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_blurImage, numberOfPixels);
h_filter = (double*)malloc(9*sizeof(double));
cudaMalloc((void**)&d_filter, 9*sizeof(double));
if(h_image == 0 || d_image == 0 || h_blurImage == 0 || d_blurImage == 0){
printf("Could not allocate memory");
return 1;
}
//Initialise Filter
h_filter[0] = 1.0; h_filter[1] = 2.0; h_filter[2] = 1.0;
h_filter[3] = 2.0; h_filter[4] = 3.0; h_filter[5] = 2.0;
h_filter[6] = 1.0; h_filter[7] = 2.0; h_filter[8] = 1.0;
// Randomly Initialize Image
srand(time(NULL));
for(int i = 0; i < (imagex*imagey); i++){
h_image[i] = (rand() % 256);
}
//Copy host memory to device
cudaMemcpy( d_image, h_image, numberOfPixels, cudaMemcpyHostToDevice);
cudaMemcpy( d_filter, h_filter, 9*sizeof(double), cudaMemcpyHostToDevice);
const dim3 blockSize(4,4,1);
const dim3 gridSize(imagex/blockSize.x+1,imagey/blockSize.y+1,1);
//Call
imageblur<<<gridSize, blockSize>>>(d_image, d_blurImage, 3, d_filter, imagey, imagex);
//copy blurred image to host
cudaMemcpy(h_blurImage, d_blurImage, numberOfPixels, cudaMemcpyDeviceToHost);
printf("Image : \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_image[i*imagex + j]);
}
printf("\n");
}
printf("Blur Image: \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_blurImage[i*imagex + j]);
}
printf("\n");
}
//Clean Memory
free(h_image); free(h_blurImage); free(h_filter);
cudaFree(d_image); cudaFree(d_blurImage); cudaFree(d_filter);
return 0;
}
|
78ceba72f67bc9242dcab8c2bd8a25fbeba1e911.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ErrorCheck.h"
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
ErrorCheck::ErrorCheck()
{
}
void ErrorCheck::chk(std::string msg)
{
error = hipGetLastError();
if (error != hipSuccess)
{
cout << msg << " : " << error;
cout << " " << hipGetErrorString(error) << endl;
}
}
void ErrorCheck::chk()
{
chk(str.str());
str.str("");
}
|
78ceba72f67bc9242dcab8c2bd8a25fbeba1e911.cu
|
#include "ErrorCheck.h"
#include <iostream>
#include <cuda.h>
using namespace std;
ErrorCheck::ErrorCheck()
{
}
void ErrorCheck::chk(std::string msg)
{
error = cudaGetLastError();
if (error != cudaSuccess)
{
cout << msg << " : " << error;
cout << " " << cudaGetErrorString(error) << endl;
}
}
void ErrorCheck::chk()
{
chk(str.str());
str.str("");
}
|
70791be1ee20ed7001c90bbd877b43bf8b927b7e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse( int *a, int *b) {
// calculate index and reverse array
int index=threadIdx.x+blockIdx.x*blockDim.x;
b[index] = a[N-1-index];
}
void random_ints(int *p, int n) {
// fill vector with random numbers
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c; // host copies of a, b, c
int *dev_a, *dev_b; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N integers
int i;
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
random_ints( a, N );
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
// launch reverse kernel with N threads
hipLaunchKernelGGL(( reverse), dim3(N/THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK) , 0, 0, dev_a, dev_b );
// copy device result back to host copy of c
hipMemcpy( c, dev_b, size, hipMemcpyDeviceToHost );
// check if result is correct
for(i=0; i<N; i++) {
b[i] = a[N-1-i];
if (b[i] != c[i]) {
printf("ERROR!\n" );
break;
}
if (i==N-1) {
printf("CORRECT!\n");
}
}
// free memory
free( a ); free( b ); free( c );
hipFree( dev_a );
hipFree( dev_b );
return 0;
}
|
70791be1ee20ed7001c90bbd877b43bf8b927b7e.cu
|
#include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse( int *a, int *b) {
// calculate index and reverse array
int index=threadIdx.x+blockIdx.x*blockDim.x;
b[index] = a[N-1-index];
}
void random_ints(int *p, int n) {
// fill vector with random numbers
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c; // host copies of a, b, c
int *dev_a, *dev_b; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N integers
int i;
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
random_ints( a, N );
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
// launch reverse kernel with N threads
reverse<<< N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>( dev_a, dev_b );
// copy device result back to host copy of c
cudaMemcpy( c, dev_b, size, cudaMemcpyDeviceToHost );
// check if result is correct
for(i=0; i<N; i++) {
b[i] = a[N-1-i];
if (b[i] != c[i]) {
printf("ERROR!\n" );
break;
}
if (i==N-1) {
printf("CORRECT!\n");
}
}
// free memory
free( a ); free( b ); free( c );
cudaFree( dev_a );
cudaFree( dev_b );
return 0;
}
|
e51dbd71204c8c08df3087b8f0f74da2a2c15e71.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "maxout_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../maxout_layer.h"
#include "../nn_types.h"
__global__ void maxout_upd_kernel(
float * __restrict output,
int * __restrict max_feature_map_positions,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
int max_pos = 0;
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_pos = i;
}
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
max_feature_map_positions[output_offset] = max_pos;
}
}
__global__ void maxout_deriviative_upd_kernel(
float * __restrict input_errors,
const int * __restrict max_feature_map_positions,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
int max_feature_map = max_feature_map_positions[output_offset];
float output_error = output_errors[output_offset];
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
for(int i = 0; i < feature_map_subsampling_size; ++i)
{
input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F);
input_offset += output_feature_map_count * neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
maxout_layer_updater_cuda::maxout_layer_updater_cuda()
{
}
maxout_layer_updater_cuda::~maxout_layer_updater_cuda()
{
}
void maxout_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
const float * input = *input_neurons_buffer;
float * output = *output_neurons_buffer;
int * max_feature_map_positions = *additional_buffers[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
hipLaunchKernelGGL(( maxout_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
output,
max_feature_map_positions,
input,
output_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
const float * output_errors = *output_errors_buffer;
int * max_feature_map_positions = *additional_buffers[0];
float * input_errors = *input_errors_buffer;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
hipLaunchKernelGGL(( maxout_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
input_errors,
max_feature_map_positions,
output_errors,
output_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("maxout_layer_updater_cuda is not able to run using the same input");
nnforge_shared_ptr<const maxout_layer> layer_derived = nnforge_dynamic_pointer_cast<const maxout_layer>(layer_schema);
feature_map_subsampling_size = layer_derived->feature_map_subsampling_size;
}
bool maxout_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> maxout_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
}
}
|
e51dbd71204c8c08df3087b8f0f74da2a2c15e71.cu
|
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "maxout_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../maxout_layer.h"
#include "../nn_types.h"
__global__ void maxout_upd_kernel(
float * __restrict output,
int * __restrict max_feature_map_positions,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
int max_pos = 0;
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_pos = i;
}
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
max_feature_map_positions[output_offset] = max_pos;
}
}
__global__ void maxout_deriviative_upd_kernel(
float * __restrict input_errors,
const int * __restrict max_feature_map_positions,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
int max_feature_map = max_feature_map_positions[output_offset];
float output_error = output_errors[output_offset];
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
for(int i = 0; i < feature_map_subsampling_size; ++i)
{
input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F);
input_offset += output_feature_map_count * neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
maxout_layer_updater_cuda::maxout_layer_updater_cuda()
{
}
maxout_layer_updater_cuda::~maxout_layer_updater_cuda()
{
}
void maxout_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
const float * input = *input_neurons_buffer;
float * output = *output_neurons_buffer;
int * max_feature_map_positions = *additional_buffers[0];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
maxout_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
output,
max_feature_map_positions,
input,
output_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
const float * output_errors = *output_errors_buffer;
int * max_feature_map_positions = *additional_buffers[0];
float * input_errors = *input_errors_buffer;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
maxout_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
input_errors,
max_feature_map_positions,
output_errors,
output_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::updater_configured()
{
if (!different_input)
throw neural_network_exception("maxout_layer_updater_cuda is not able to run using the same input");
nnforge_shared_ptr<const maxout_layer> layer_derived = nnforge_dynamic_pointer_cast<const maxout_layer>(layer_schema);
feature_map_subsampling_size = layer_derived->feature_map_subsampling_size;
}
bool maxout_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
std::vector<size_t> maxout_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
}
}
|
a0a30b499936836f5c305578220cd07a3341c384.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include "Utils.hpp"
#include <iostream>
__global__ void extract_pulses(hipfftReal* input_buffer, hipfftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail) {
hipfftReal min_elem = input_buffer[0];
hipfftReal max_elem = input_buffer[0];
for (size_t i = 1; i < samples_per_buffer; ++i) {
if (input_buffer[i] < min_elem) min_elem = input_buffer[i];
else if (input_buffer[i] > max_elem) max_elem = input_buffer[i];
}
const hipfftReal thresh = (min_elem + max_elem) / 2.f;
size_t i = 0;
size_t output_idx = 0;
while (i < samples_per_buffer) {
hipfftReal curr = input_buffer[i];
// If we are not above the pulse peak threshold, keep searching
if (curr < thresh) {
++i;
continue;
}
// curr is at or above the threshold now
bool found_pulse = false;
size_t j = i + 1;
for (; j < samples_per_buffer; ++j) {
hipfftReal test = input_buffer[j];
// Keep searching until we dip below the threshold again
if (test < thresh) {
--j;
found_pulse = true;
break;
}
}
if (!found_pulse) break;
// We now have a pulse of width (j-i)+1 in idx units
const size_t mid_idx = (i + j) / 2;
const size_t fwhm = j - i + 1;
const size_t left_end = left_tail * fwhm > mid_idx ? 0 : mid_idx - left_tail * fwhm;
const size_t right_end = right_tail * fwhm + mid_idx >= samples_per_buffer ? samples_per_buffer - 1 : mid_idx + right_tail * fwhm;
const size_t pulse_size = right_end - left_end;
// Copy the peak to the output buffer
for (size_t k = 0; k < pulse_size; ++k) {
if ((output_idx + k) < samples_per_buffer && (left_end + k) < samples_per_buffer) {
output_buffer[output_idx + k] = input_buffer[left_end + k];
}
}
// Add a NaN between pulses to delimit them
output_idx += pulse_size + 1;
if (output_idx < samples_per_buffer) {
output_buffer[output_idx++] = nanf("");
}
// Skip to the end of the pulse
i = j + 1;
}
// *data_size = output_idx;
}
// TODO: Split this up into many kernel launches?
// __global__ void extract_pulses(hipfftReal* input_buffer, hipfftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail, size_t* data_size) {
__host__ void gpu_extract_pulses(hipfftReal* input_buffer, hipfftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail, size_t* data_size) {
hipLaunchKernelGGL(( extract_pulses), dim3(1), dim3(1), 0, 0, input_buffer, output_buffer, samples_per_buffer, left_tail, right_tail);
}
|
a0a30b499936836f5c305578220cd07a3341c384.cu
|
#include <cuda_runtime.h>
#include <cufft.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include "Utils.hpp"
#include <iostream>
__global__ void extract_pulses(cufftReal* input_buffer, cufftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail) {
cufftReal min_elem = input_buffer[0];
cufftReal max_elem = input_buffer[0];
for (size_t i = 1; i < samples_per_buffer; ++i) {
if (input_buffer[i] < min_elem) min_elem = input_buffer[i];
else if (input_buffer[i] > max_elem) max_elem = input_buffer[i];
}
const cufftReal thresh = (min_elem + max_elem) / 2.f;
size_t i = 0;
size_t output_idx = 0;
while (i < samples_per_buffer) {
cufftReal curr = input_buffer[i];
// If we are not above the pulse peak threshold, keep searching
if (curr < thresh) {
++i;
continue;
}
// curr is at or above the threshold now
bool found_pulse = false;
size_t j = i + 1;
for (; j < samples_per_buffer; ++j) {
cufftReal test = input_buffer[j];
// Keep searching until we dip below the threshold again
if (test < thresh) {
--j;
found_pulse = true;
break;
}
}
if (!found_pulse) break;
// We now have a pulse of width (j-i)+1 in idx units
const size_t mid_idx = (i + j) / 2;
const size_t fwhm = j - i + 1;
const size_t left_end = left_tail * fwhm > mid_idx ? 0 : mid_idx - left_tail * fwhm;
const size_t right_end = right_tail * fwhm + mid_idx >= samples_per_buffer ? samples_per_buffer - 1 : mid_idx + right_tail * fwhm;
const size_t pulse_size = right_end - left_end;
// Copy the peak to the output buffer
for (size_t k = 0; k < pulse_size; ++k) {
if ((output_idx + k) < samples_per_buffer && (left_end + k) < samples_per_buffer) {
output_buffer[output_idx + k] = input_buffer[left_end + k];
}
}
// Add a NaN between pulses to delimit them
output_idx += pulse_size + 1;
if (output_idx < samples_per_buffer) {
output_buffer[output_idx++] = nanf("");
}
// Skip to the end of the pulse
i = j + 1;
}
// *data_size = output_idx;
}
// TODO: Split this up into many kernel launches?
// __global__ void extract_pulses(cufftReal* input_buffer, cufftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail, size_t* data_size) {
__host__ void gpu_extract_pulses(cufftReal* input_buffer, cufftReal* output_buffer, const size_t samples_per_buffer, const size_t left_tail, const size_t right_tail, size_t* data_size) {
extract_pulses<<<1, 1>>>(input_buffer, output_buffer, samples_per_buffer, left_tail, right_tail);
}
|
391774d18f02145e1cacfdaf95b84d1eaa085da0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolutionGPUkernel_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
int *mascara = NULL;
hipMalloc(&mascara, XSIZE*YSIZE);
int *resultado = NULL;
hipMalloc(&resultado, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int widthM = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolutionGPUkernel_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, M,mascara,resultado,m,n,widthM);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolutionGPUkernel_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, M,mascara,resultado,m,n,widthM);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolutionGPUkernel_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, M,mascara,resultado,m,n,widthM);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
391774d18f02145e1cacfdaf95b84d1eaa085da0.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolutionGPUkernel_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
int *mascara = NULL;
cudaMalloc(&mascara, XSIZE*YSIZE);
int *resultado = NULL;
cudaMalloc(&resultado, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int widthM = XSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolutionGPUkernel_2D<<<gridBlock,threadBlock>>>(M,mascara,resultado,m,n,widthM);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolutionGPUkernel_2D<<<gridBlock,threadBlock>>>(M,mascara,resultado,m,n,widthM);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolutionGPUkernel_2D<<<gridBlock,threadBlock>>>(M,mascara,resultado,m,n,widthM);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
93479e2385009e6ded1f51ceea4b057eae202c7d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2016, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <thrust/count.h>
#include "test_cuda_common.h"
using namespace seqan;
// ============================================================================
// Types
// ============================================================================
typedef TagList<String<char, Alloc<> >,
TagList<String<Dna, Alloc<> >,
TagList<String<Dna5, Alloc<> >
// TagList<String<Dna, Packed<> >
> > > //>
StringTypes;
// TODO(esiragusa): test StringSets.
//typedef TagList<StringSet<CharString, Owner<ConcatDirect<> > >,
// TagList<StringSet<DnaString, Owner<ConcatDirect<> > >
// > >
// TStringSetTypes;
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<StringTypes, Owner<ConcatDirect<> > >::Type TStringSetTypes;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaSequenceTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaSequenceTest : public Test
{
public:
typedef TType TString;
typedef typename Device<TString>::Type TCudaString;
typedef typename Value<TString>::Type TAlphabet;
TString str;
CudaSequenceTest() :
str("ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT")
{}
};
SEQAN_TYPED_TEST_CASE(CudaSequenceTest, StringTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, Assign)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename TestFixture::TAlphabet TAlphabet;
hipDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
SEQAN_ASSERT_EQ(length(cudaStr), length(this->str));
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('A')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('C')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('G')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('T')), 10u);
// TString str;
// assign(cudaStr, str);
// SEQAN_ASSERT_EQ(str, this->str);
}
// ----------------------------------------------------------------------------
// Test getValue()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, GetValue)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename View<TCudaString>::Type TCudaStringView;
typedef typename Size<TString>::Type TSize;
hipDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
TCudaStringView cudaStrView = view(cudaStr);
for (TSize pos = 0; pos < length(this->str); pos++)
{
hipLaunchKernelGGL(( testGetValue), dim3(1),dim3(1), 0, 0, cudaStrView, pos, getValue(this->str, pos));
hipDeviceSynchronize();
SEQAN_ASSERT_EQ(hipGetLastError(), hipSuccess);
}
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
|
93479e2385009e6ded1f51ceea4b057eae202c7d.cu
|
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2016, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <thrust/count.h>
#include "test_cuda_common.h"
using namespace seqan;
// ============================================================================
// Types
// ============================================================================
typedef TagList<String<char, Alloc<> >,
TagList<String<Dna, Alloc<> >,
TagList<String<Dna5, Alloc<> >
// TagList<String<Dna, Packed<> >
> > > //>
StringTypes;
// TODO(esiragusa): test StringSets.
//typedef TagList<StringSet<CharString, Owner<ConcatDirect<> > >,
// TagList<StringSet<DnaString, Owner<ConcatDirect<> > >
// > >
// TStringSetTypes;
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<StringTypes, Owner<ConcatDirect<> > >::Type TStringSetTypes;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaSequenceTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaSequenceTest : public Test
{
public:
typedef TType TString;
typedef typename Device<TString>::Type TCudaString;
typedef typename Value<TString>::Type TAlphabet;
TString str;
CudaSequenceTest() :
str("ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT")
{}
};
SEQAN_TYPED_TEST_CASE(CudaSequenceTest, StringTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, Assign)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename TestFixture::TAlphabet TAlphabet;
cudaDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
SEQAN_ASSERT_EQ(length(cudaStr), length(this->str));
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('A')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('C')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('G')), 10u);
SEQAN_ASSERT_EQ(thrust::count(begin(cudaStr, Standard()), end(cudaStr, Standard()), TAlphabet('T')), 10u);
// TString str;
// assign(cudaStr, str);
// SEQAN_ASSERT_EQ(str, this->str);
}
// ----------------------------------------------------------------------------
// Test getValue()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaSequenceTest, GetValue)
{
typedef typename TestFixture::TString TString;
typedef typename TestFixture::TCudaString TCudaString;
typedef typename View<TCudaString>::Type TCudaStringView;
typedef typename Size<TString>::Type TSize;
cudaDeviceReset();
TCudaString cudaStr;
assign(cudaStr, this->str);
TCudaStringView cudaStrView = view(cudaStr);
for (TSize pos = 0; pos < length(this->str); pos++)
{
testGetValue<<<1,1>>>(cudaStrView, pos, getValue(this->str, pos));
cudaDeviceSynchronize();
SEQAN_ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
|
2372bd772c3382890e21aae6eaa46f1de6485976.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "Graph.h"
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
using namespace std;
#define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); }
float device_time_taken;
// Catch Cuda errors
inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false)
{
if (error != hipSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void betweennessCentralityKernel(Graph *graph, double *bwCentrality, int nodeCount,
int *sigma, int *distance, double *dependency) {
int idx = threadIdx.x;
if(idx >= nodeCount)
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = -1;
// printf("Progress... %3d%%", 0);
}
__syncthreads();
while(s < nodeCount -1)
{
if(idx == 0)
{
++s;
// printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount);
done = false;
current_depth = -1;
}
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(v == s)
{
distance[v] = 0;
sigma[v] = 1;
}
else
{
distance[v] = INT_MAX;
sigma[v] = 0;
}
dependency[v] = 0.0;
}
__syncthreads();
// BFS
while(!done)
{
if(idx == 0){
current_depth++;
}
done = true;
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(distance[v] == current_depth)
{
for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
{
int w = graph->adjacencyList[r];
if(distance[w] == INT_MAX)
{
distance[w] = distance[v] + 1;
done = false;
}
if(distance[w] == (distance[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__syncthreads();
}
// Reverse BFS
while(current_depth)
{
if(idx == 0){
current_depth--;
}
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(distance[v] == current_depth)
{
for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
{
int w = graph->adjacencyList[r];
if(distance[w] == (distance[v] + 1))
{
if (sigma[w] != 0)
dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]);
}
}
if (v != s)
{
// Each shortest path is counted twice. So, each partial shortest path dependency is halved.
bwCentrality[v] += dependency[v] / 2;
}
}
}
__syncthreads();
}
}
}
double *betweennessCentrality(Graph *graph, int nodeCount)
{
double *bwCentrality = new double[nodeCount]();
double *device_bwCentrality, *dependency;
int *sigma, *distance;
//TODO: Allocate device memory for bwCentrality
catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount));
catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&dependency, sizeof(double) * nodeCount));
catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, hipMemcpyHostToDevice));
// Timer
hipEvent_t device_start, device_end;
catchCudaError(hipEventCreate(&device_start));
catchCudaError(hipEventCreate(&device_end));
catchCudaError(hipEventRecord(device_start));
hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(1), dim3(MAX_THREAD_COUNT), 0, 0, graph, device_bwCentrality, nodeCount, sigma, distance, dependency);
hipDeviceSynchronize();
//End of progress bar
cout << endl;
// Timer
catchCudaError(hipEventRecord(device_end));
catchCudaError(hipEventSynchronize(device_end));
hipEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, hipMemcpyDeviceToHost));
catchCudaError(hipFree(device_bwCentrality));
catchCudaError(hipFree(sigma));
catchCudaError(hipFree(dependency));
catchCudaError(hipFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n";
return 0;
}
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice));
// Copy Adjancency List to device
int *adjacencyList;
// Alocate device memory and copy
catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice));
// Copy Adjancency List Pointers to device
int *adjacencyListPointers;
// Alocate device memory and copy
catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)));
catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice));
double *bwCentrality = betweennessCentrality(device_graph, nodeCount);
double maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++)
{
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
}
printf("%f", device_time_taken);
if (argc == 3)
{
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(hipFree(adjacencyList));
catchCudaError(hipFree(adjacencyListPointers));
catchCudaError(hipFree(device_graph));
}
|
2372bd772c3382890e21aae6eaa46f1de6485976.cu
|
#include <iostream>
#include <cuda.h>
#include "Graph.h"
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
using namespace std;
#define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); }
float device_time_taken;
// Catch Cuda errors
inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false)
{
if (error != cudaSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void betweennessCentralityKernel(Graph *graph, double *bwCentrality, int nodeCount,
int *sigma, int *distance, double *dependency) {
int idx = threadIdx.x;
if(idx >= nodeCount)
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = -1;
// printf("Progress... %3d%%", 0);
}
__syncthreads();
while(s < nodeCount -1)
{
if(idx == 0)
{
++s;
// printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount);
done = false;
current_depth = -1;
}
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(v == s)
{
distance[v] = 0;
sigma[v] = 1;
}
else
{
distance[v] = INT_MAX;
sigma[v] = 0;
}
dependency[v] = 0.0;
}
__syncthreads();
// BFS
while(!done)
{
if(idx == 0){
current_depth++;
}
done = true;
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(distance[v] == current_depth)
{
for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
{
int w = graph->adjacencyList[r];
if(distance[w] == INT_MAX)
{
distance[w] = distance[v] + 1;
done = false;
}
if(distance[w] == (distance[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__syncthreads();
}
// Reverse BFS
while(current_depth)
{
if(idx == 0){
current_depth--;
}
__syncthreads();
for(int v=idx; v<nodeCount; v+=blockDim.x)
{
if(distance[v] == current_depth)
{
for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++)
{
int w = graph->adjacencyList[r];
if(distance[w] == (distance[v] + 1))
{
if (sigma[w] != 0)
dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]);
}
}
if (v != s)
{
// Each shortest path is counted twice. So, each partial shortest path dependency is halved.
bwCentrality[v] += dependency[v] / 2;
}
}
}
__syncthreads();
}
}
}
double *betweennessCentrality(Graph *graph, int nodeCount)
{
double *bwCentrality = new double[nodeCount]();
double *device_bwCentrality, *dependency;
int *sigma, *distance;
//TODO: Allocate device memory for bwCentrality
catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount));
catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&dependency, sizeof(double) * nodeCount));
catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, cudaMemcpyHostToDevice));
// Timer
cudaEvent_t device_start, device_end;
catchCudaError(cudaEventCreate(&device_start));
catchCudaError(cudaEventCreate(&device_end));
catchCudaError(cudaEventRecord(device_start));
betweennessCentralityKernel<<<1, MAX_THREAD_COUNT>>>(graph, device_bwCentrality, nodeCount, sigma, distance, dependency);
cudaDeviceSynchronize();
//End of progress bar
cout << endl;
// Timer
catchCudaError(cudaEventRecord(device_end));
catchCudaError(cudaEventSynchronize(device_end));
cudaEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, cudaMemcpyDeviceToHost));
catchCudaError(cudaFree(device_bwCentrality));
catchCudaError(cudaFree(sigma));
catchCudaError(cudaFree(dependency));
catchCudaError(cudaFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n";
return 0;
}
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice));
// Copy Adjancency List to device
int *adjacencyList;
// Alocate device memory and copy
catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice));
// Copy Adjancency List Pointers to device
int *adjacencyListPointers;
// Alocate device memory and copy
catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)));
catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice));
// Update the pointer to this, in device_graph
catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice));
double *bwCentrality = betweennessCentrality(device_graph, nodeCount);
double maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++)
{
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
}
printf("%f", device_time_taken);
if (argc == 3)
{
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(cudaFree(adjacencyList));
catchCudaError(cudaFree(adjacencyListPointers));
catchCudaError(cudaFree(device_graph));
}
|
23138d6ac80e779883bf36b407fa76d1118fd232.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/operators.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <contrib/libs/cub/cub/block/block_radix_sort.cuh>
namespace NKernel {
template <typename T>
__global__ void AddVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 + x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void AddVectorImpl(T *x, const T y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = y + x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = x0 - y0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = x0 - y;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 * x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = __ldg(x + i);
T r0 = x0 * c;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size);
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T y0 = y[i];
T r0 = ZeroAwareDivide(x0, y0, skipZeroes);
x[i] = r0;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T r0 = ZeroAwareDivide(x0, y, skipZeroes);
x[i] = r0;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
__global__ void ExpVectorImpl(T *x, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T val = __ldg(x + i);
x[i] = __expf(val);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void ExpVector(T *x, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size);
}
template <typename T, typename Index>
__global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size,
int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize));
}
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui64 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + i, StreamLoad(src + m));
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui64 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T, typename Index>
__global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column));
}
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + m, StreamLoad(src + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T>
__global__ void ReverseImpl(T *data, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
ui64 half = size / 2;
while (i < half) {
T a = data[i];
T b = data[size - i - 1];
data[i] = b;
data[size - i - 1] = a;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void Reverse(T* data, ui64 size, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size);
}
#define BIN_OP_VECTOR_TEMPL(Type) \
template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \
template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\
template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\
template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\
BIN_OP_VECTOR_TEMPL(int)
BIN_OP_VECTOR_TEMPL(float)
BIN_OP_VECTOR_TEMPL(ui32)
BIN_OP_VECTOR_TEMPL(double)
BIN_OP_VECTOR_TEMPL(ui8)
BIN_OP_VECTOR_TEMPL(uint2)
BIN_OP_VECTOR_TEMPL(ui16)
#define FUNC_VECTOR_TEMPL(Type) \
template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\
FUNC_VECTOR_TEMPL(float)
#define GATHER_SCATTER_TEMPL(Type, IndexType) \
template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \
template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \
template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \
template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream);
GATHER_SCATTER_TEMPL(int, ui32)
GATHER_SCATTER_TEMPL(ui8, ui32)
GATHER_SCATTER_TEMPL(uint2, ui32)
GATHER_SCATTER_TEMPL(ui32, ui32)
GATHER_SCATTER_TEMPL(float, ui32)
GATHER_SCATTER_TEMPL(bool, ui32)
#define REVERSE_VECTOR_TEMPL(Type) \
template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream);
REVERSE_VECTOR_TEMPL(char)
REVERSE_VECTOR_TEMPL(float)
REVERSE_VECTOR_TEMPL(unsigned char)
REVERSE_VECTOR_TEMPL(short)
REVERSE_VECTOR_TEMPL(ui16)
REVERSE_VECTOR_TEMPL(int)
REVERSE_VECTOR_TEMPL(ui32)
// PowVector
template <typename T>
__global__ void PowVectorImpl(T* const x, const T base, const ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
x[i] = pow(base, x[i]);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
// PowVector
template <typename T>
__global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
y[i] = pow(base, x[i]);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size, y);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
}
|
23138d6ac80e779883bf36b407fa76d1118fd232.cu
|
#include "transform.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/operators.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <contrib/libs/cub/cub/block/block_radix_sort.cuh>
namespace NKernel {
template <typename T>
__global__ void AddVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 + x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void AddVectorImpl(T *x, const T y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = y + x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = x0 - y0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = x0 - y;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 * x0;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = __ldg(x + i);
T r0 = x0 * c;
WriteThrough(x + i, r0);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size);
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T y0 = y[i];
T r0 = ZeroAwareDivide(x0, y0, skipZeroes);
x[i] = r0;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T r0 = ZeroAwareDivide(x0, y, skipZeroes);
x[i] = r0;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
__global__ void ExpVectorImpl(T *x, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T val = __ldg(x + i);
x[i] = __expf(val);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void ExpVector(T *x, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size);
}
template <typename T, typename Index>
__global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size,
int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize));
}
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui64 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + i, StreamLoad(src + m));
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui64 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T, typename Index>
__global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column));
}
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) {
Index i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + m, StreamLoad(src + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
if (numBlocks) {
ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T>
__global__ void ReverseImpl(T *data, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
ui64 half = size / 2;
while (i < half) {
T a = data[i];
T b = data[size - i - 1];
data[i] = b;
data[size - i - 1] = a;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void Reverse(T* data, ui64 size, TCudaStream stream) {
const ui32 blockSize = 256;
const ui64 numBlocks = min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size);
}
#define BIN_OP_VECTOR_TEMPL(Type) \
template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \
template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\
template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\
template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\
BIN_OP_VECTOR_TEMPL(int)
BIN_OP_VECTOR_TEMPL(float)
BIN_OP_VECTOR_TEMPL(ui32)
BIN_OP_VECTOR_TEMPL(double)
BIN_OP_VECTOR_TEMPL(ui8)
BIN_OP_VECTOR_TEMPL(uint2)
BIN_OP_VECTOR_TEMPL(ui16)
#define FUNC_VECTOR_TEMPL(Type) \
template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\
FUNC_VECTOR_TEMPL(float)
#define GATHER_SCATTER_TEMPL(Type, IndexType) \
template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \
template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \
template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \
template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream);
GATHER_SCATTER_TEMPL(int, ui32)
GATHER_SCATTER_TEMPL(ui8, ui32)
GATHER_SCATTER_TEMPL(uint2, ui32)
GATHER_SCATTER_TEMPL(ui32, ui32)
GATHER_SCATTER_TEMPL(float, ui32)
GATHER_SCATTER_TEMPL(bool, ui32)
#define REVERSE_VECTOR_TEMPL(Type) \
template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream);
REVERSE_VECTOR_TEMPL(char)
REVERSE_VECTOR_TEMPL(float)
REVERSE_VECTOR_TEMPL(unsigned char)
REVERSE_VECTOR_TEMPL(short)
REVERSE_VECTOR_TEMPL(ui16)
REVERSE_VECTOR_TEMPL(int)
REVERSE_VECTOR_TEMPL(ui32)
// PowVector
template <typename T>
__global__ void PowVectorImpl(T* const x, const T base, const ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
x[i] = pow(base, x[i]);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
// PowVector
template <typename T>
__global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
y[i] = pow(base, x[i]);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui64 numBlocks = Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size, y);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
}
|
b99cb8224d1e800448dd19077d108ca5bf931055.hip
|
// !!! This is a file automatically generated by hipify!!!
/* --------------------------------------------------------------------
OPTIMIZED CODE MAKING USE OF REGISTERS + SHARED MEMORY
----------------------------------------------------------------------*/
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void cheby (double h2inv, double a, double b, double c, double d, double * __restrict__ RHS, double * __restrict__ Ap, double * __restrict__ Dinv, double * __restrict__ Ac, double * __restrict__ out, int L, int M, int N, double * __restrict__ out1) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-4);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-4);
int j = max (j0, 0) + (int)(threadIdx.y);
//Declarations
double reg_Ac_m2=0, reg_Ac_m1=0, __shared__ sh_Ac_c0[16][32];
double reg_Ap_m1=0;
double reg_Dinv_m2=0, reg_Dinv_m1=0;
double reg_RHS_m2=0, reg_RHS_m1=0;
double reg_out1_m2=0, __shared__ sh_out1_m1[16][32];
double reg_temp1_m1=0, reg_temp1_c0=0, reg_temp1_p1=0;
double reg_temp2_m2=0, reg_temp2_m1=0, reg_temp2_c0=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
sh_Ac_c0[j-j0][i-i0] = 0;
sh_out1_m1[j-j0][i-i0] = 0;
}
//Rest of the computation
for (int k=0; k<=L-1; ++k) {
//Fetch new plane
sh_Ac_c0[j-j0][i-i0] = Ac[k*M*N + j*N + i];
reg_Ap_m1 = Ap[max(k-1,0)*M*N + j*N + i];
reg_Dinv_m1 = Dinv[max(k-1,0)*M*N + j*N + i];
reg_RHS_m1 = RHS[max(k-1,0)*M*N + j*N + i];
__syncthreads ();
if (j >= max (j0+1, 1) & j <= min (j0+blockdim_j-2, M-2) & i >= max (i0+1, 1) & i <= min (i0+blockdim_i-2, N-2)) {
reg_temp1_p1 -= (h2inv * (((0.03f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.1f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) + (0.46f * sh_Ac_c0[j-j0][i-i0])));
reg_temp1_c0 += (sh_Ac_c0[j-j0][i-i0] - (h2inv * (((0.1f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.46f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) - (4.26f * sh_Ac_c0[j-j0][i-i0]))));
reg_temp1_m1 -= (h2inv * (((0.03f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.1f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) + (0.46f * sh_Ac_c0[j-j0][i-i0])));
sh_out1_m1[j-j0][i-i0] = ((reg_Ac_m1 + (a * (reg_Ac_m1 - reg_Ap_m1))) + ((b * reg_Dinv_m1) * (reg_RHS_m1 - reg_temp1_m1)));
out1[max(k-1,0)*M*N+j*N+i] = sh_out1_m1[j-j0][i-i0];
}
__syncthreads ();
if (j >= max (j0+2, 1) & j <= min (j0+blockdim_j-3, M-2) & i >= max (i0+2, 1) & i <= min (i0+blockdim_i-3, N-2)) {
reg_temp2_c0 -= (h2inv * (((0.03f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.1f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) + (0.46f * sh_out1_m1[j-j0][i-i0])));
reg_temp2_m1 += (sh_out1_m1[j-j0][i-i0] - (h2inv * (((0.1f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.46f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) - (4.26f * sh_out1_m1[j-j0][i-i0]))));
reg_temp2_m2 -= (h2inv * (((0.03f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.1f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) + (0.46f * sh_out1_m1[j-j0][i-i0])));
out[max(k-2,0)*M*N + j*N + i] = ((reg_out1_m2 + (c * (reg_out1_m2 - reg_Ac_m2))) + ((d * reg_Dinv_m2) * (reg_RHS_m2 - reg_temp2_m2)));
}
__syncthreads ();
//Value rotation
reg_Ac_m2 = reg_Ac_m1;
reg_Ac_m1 = sh_Ac_c0[j-j0][i-i0];
sh_Ac_c0[j-j0][i-i0] = 0;
reg_Dinv_m2 = reg_Dinv_m1;
reg_Dinv_m1 = 0;
reg_RHS_m2 = reg_RHS_m1;
reg_RHS_m1 = 0;
reg_out1_m2 = sh_out1_m1[j-j0][i-i0];
sh_out1_m1[j-j0][i-i0] = 0;
reg_temp1_m1 = reg_temp1_c0;
reg_temp1_c0 = reg_temp1_p1;
reg_temp1_p1 = 0;
reg_temp2_m2 = reg_temp2_m1;
reg_temp2_m1 = reg_temp2_c0;
reg_temp2_c0 = 0;
__syncthreads ();
}
}
extern "C" void host_code (double *h_Ac, double *h_Ap, double *h_Dinv, double *h_RHS, double *h_out, double *c1, double *c2, double h2inv, int L, int M, int N) {
double *Ac;
hipMalloc (&Ac, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Ac\n");
hipMemcpy (Ac, h_Ac, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *Ap;
hipMalloc (&Ap, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Ap\n");
hipMemcpy (Ap, h_Ap, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *Dinv;
hipMalloc (&Dinv, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Dinv\n");
hipMemcpy (Dinv, h_Dinv, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *RHS;
hipMalloc (&RHS, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for RHS\n");
hipMemcpy (RHS, h_RHS, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out\n");
double *out1;
hipMalloc (&out1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
double *out2;
hipMalloc (&out2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
double *out3;
hipMalloc (&out3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out3\n");
dim3 blockconfig_1 (32, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-4), ceil(M, blockconfig_1.y-4), 1);
dim3 blockconfig_2 (32, 16, 1);
dim3 gridconfig_2 (ceil(N, blockconfig_2.x-4), ceil(M, blockconfig_2.y-4), 1);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( cheby) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, h2inv, c1[0], c2[0], c1[1], c2[1], RHS, Ap, Dinv, Ac, out2, L, M, N, out1);
hipLaunchKernelGGL(( cheby) , dim3(gridconfig_2), dim3(blockconfig_2), 0, 0, h2inv, c1[2], c2[2], c1[3], c2[3], RHS, out1, Dinv, out2, out, L, M, N, out3);
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipMemcpy (h_out, out, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
}
|
b99cb8224d1e800448dd19077d108ca5bf931055.cu
|
/* --------------------------------------------------------------------
OPTIMIZED CODE MAKING USE OF REGISTERS + SHARED MEMORY
----------------------------------------------------------------------*/
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <nvml.h>
#include <assert.h>
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void cheby (double h2inv, double a, double b, double c, double d, double * __restrict__ RHS, double * __restrict__ Ap, double * __restrict__ Dinv, double * __restrict__ Ac, double * __restrict__ out, int L, int M, int N, double * __restrict__ out1) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-4);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-4);
int j = max (j0, 0) + (int)(threadIdx.y);
//Declarations
double reg_Ac_m2=0, reg_Ac_m1=0, __shared__ sh_Ac_c0[16][32];
double reg_Ap_m1=0;
double reg_Dinv_m2=0, reg_Dinv_m1=0;
double reg_RHS_m2=0, reg_RHS_m1=0;
double reg_out1_m2=0, __shared__ sh_out1_m1[16][32];
double reg_temp1_m1=0, reg_temp1_c0=0, reg_temp1_p1=0;
double reg_temp2_m2=0, reg_temp2_m1=0, reg_temp2_c0=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
sh_Ac_c0[j-j0][i-i0] = 0;
sh_out1_m1[j-j0][i-i0] = 0;
}
//Rest of the computation
for (int k=0; k<=L-1; ++k) {
//Fetch new plane
sh_Ac_c0[j-j0][i-i0] = Ac[k*M*N + j*N + i];
reg_Ap_m1 = Ap[max(k-1,0)*M*N + j*N + i];
reg_Dinv_m1 = Dinv[max(k-1,0)*M*N + j*N + i];
reg_RHS_m1 = RHS[max(k-1,0)*M*N + j*N + i];
__syncthreads ();
if (j >= max (j0+1, 1) & j <= min (j0+blockdim_j-2, M-2) & i >= max (i0+1, 1) & i <= min (i0+blockdim_i-2, N-2)) {
reg_temp1_p1 -= (h2inv * (((0.03f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.1f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) + (0.46f * sh_Ac_c0[j-j0][i-i0])));
reg_temp1_c0 += (sh_Ac_c0[j-j0][i-i0] - (h2inv * (((0.1f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.46f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) - (4.26f * sh_Ac_c0[j-j0][i-i0]))));
reg_temp1_m1 -= (h2inv * (((0.03f * (((sh_Ac_c0[j-j0-1][i-i0-1] + sh_Ac_c0[j-j0-1][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0-1]) + sh_Ac_c0[j-j0+1][i-i0+1])) + (0.1f * (((sh_Ac_c0[j-j0-1][i-i0] + sh_Ac_c0[j-j0][i-i0-1]) + sh_Ac_c0[j-j0][i-i0+1]) + sh_Ac_c0[j-j0+1][i-i0]))) + (0.46f * sh_Ac_c0[j-j0][i-i0])));
sh_out1_m1[j-j0][i-i0] = ((reg_Ac_m1 + (a * (reg_Ac_m1 - reg_Ap_m1))) + ((b * reg_Dinv_m1) * (reg_RHS_m1 - reg_temp1_m1)));
out1[max(k-1,0)*M*N+j*N+i] = sh_out1_m1[j-j0][i-i0];
}
__syncthreads ();
if (j >= max (j0+2, 1) & j <= min (j0+blockdim_j-3, M-2) & i >= max (i0+2, 1) & i <= min (i0+blockdim_i-3, N-2)) {
reg_temp2_c0 -= (h2inv * (((0.03f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.1f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) + (0.46f * sh_out1_m1[j-j0][i-i0])));
reg_temp2_m1 += (sh_out1_m1[j-j0][i-i0] - (h2inv * (((0.1f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.46f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) - (4.26f * sh_out1_m1[j-j0][i-i0]))));
reg_temp2_m2 -= (h2inv * (((0.03f * (((sh_out1_m1[j-j0-1][i-i0-1] + sh_out1_m1[j-j0-1][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0-1]) + sh_out1_m1[j-j0+1][i-i0+1])) + (0.1f * (((sh_out1_m1[j-j0-1][i-i0] + sh_out1_m1[j-j0][i-i0-1]) + sh_out1_m1[j-j0][i-i0+1]) + sh_out1_m1[j-j0+1][i-i0]))) + (0.46f * sh_out1_m1[j-j0][i-i0])));
out[max(k-2,0)*M*N + j*N + i] = ((reg_out1_m2 + (c * (reg_out1_m2 - reg_Ac_m2))) + ((d * reg_Dinv_m2) * (reg_RHS_m2 - reg_temp2_m2)));
}
__syncthreads ();
//Value rotation
reg_Ac_m2 = reg_Ac_m1;
reg_Ac_m1 = sh_Ac_c0[j-j0][i-i0];
sh_Ac_c0[j-j0][i-i0] = 0;
reg_Dinv_m2 = reg_Dinv_m1;
reg_Dinv_m1 = 0;
reg_RHS_m2 = reg_RHS_m1;
reg_RHS_m1 = 0;
reg_out1_m2 = sh_out1_m1[j-j0][i-i0];
sh_out1_m1[j-j0][i-i0] = 0;
reg_temp1_m1 = reg_temp1_c0;
reg_temp1_c0 = reg_temp1_p1;
reg_temp1_p1 = 0;
reg_temp2_m2 = reg_temp2_m1;
reg_temp2_m1 = reg_temp2_c0;
reg_temp2_c0 = 0;
__syncthreads ();
}
}
extern "C" void host_code (double *h_Ac, double *h_Ap, double *h_Dinv, double *h_RHS, double *h_out, double *c1, double *c2, double h2inv, int L, int M, int N) {
double *Ac;
cudaMalloc (&Ac, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Ac\n");
cudaMemcpy (Ac, h_Ac, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *Ap;
cudaMalloc (&Ap, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Ap\n");
cudaMemcpy (Ap, h_Ap, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *Dinv;
cudaMalloc (&Dinv, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for Dinv\n");
cudaMemcpy (Dinv, h_Dinv, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *RHS;
cudaMalloc (&RHS, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for RHS\n");
cudaMemcpy (RHS, h_RHS, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out\n");
double *out1;
cudaMalloc (&out1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
double *out2;
cudaMalloc (&out2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
double *out3;
cudaMalloc (&out3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out3\n");
dim3 blockconfig_1 (32, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-4), ceil(M, blockconfig_1.y-4), 1);
dim3 blockconfig_2 (32, 16, 1);
dim3 gridconfig_2 (ceil(N, blockconfig_2.x-4), ceil(M, blockconfig_2.y-4), 1);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
cheby <<<gridconfig_1, blockconfig_1>>> (h2inv, c1[0], c2[0], c1[1], c2[1], RHS, Ap, Dinv, Ac, out2, L, M, N, out1);
cheby <<<gridconfig_2, blockconfig_2>>> (h2inv, c1[2], c2[2], c1[3], c2[3], RHS, out1, Dinv, out2, out, L, M, N, out3);
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaMemcpy (h_out, out, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
e17ad95728de788056f56427e6fad00dd4a91371.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <thrust/device_vector.h>
#include <cudf/cudf.h>
#include <hash/concurrent_unordered_multimap.cuh>
#include <gtest/gtest.h>
#include <iostream>
#include <limits>
#include <vector>
#include <cstdlib>
// This is necessary to do a parametrized typed-test over multiple template
// arguments
template <typename Key, typename Value>
struct KeyValueTypes {
using key_type = Key;
using value_type = Value;
};
// A new instance of this class will be created for each *TEST(MultimapTest,
// ...) Put all repeated stuff for each test here
template <class T>
class MultimapTest : public GdfTest {
public:
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using size_type = int;
using multimap_type =
concurrent_unordered_multimap<key_type, value_type, size_type,
std::numeric_limits<key_type>::max(),
std::numeric_limits<value_type>::max()>;
std::unique_ptr<multimap_type, std::function<void(multimap_type*)>> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = std::numeric_limits<value_type>::max();
const size_type size;
MultimapTest(const size_type hash_table_size = 100)
: the_map(multimap_type::create(hash_table_size)),
size(hash_table_size) {
CUDA_TRY(hipStreamSynchronize(0));
}
~MultimapTest() {}
};
// Google Test can only do a parameterized typed-test over a single type, so we
// have to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types<
KeyValueTypes<int, int>, KeyValueTypes<int, long long int>,
KeyValueTypes<int, unsigned long long int>,
KeyValueTypes<unsigned long long int, int>,
KeyValueTypes<unsigned long long int, long long int>,
KeyValueTypes<unsigned long long int, unsigned long long int>>
Implementations;
TYPED_TEST_CASE(MultimapTest, Implementations);
TYPED_TEST(MultimapTest, InitialState) {
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin, end);
}
TYPED_TEST(MultimapTest, CheckUnusedValues) {
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
|
e17ad95728de788056f56427e6fad00dd4a91371.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <thrust/device_vector.h>
#include <cudf/cudf.h>
#include <hash/concurrent_unordered_multimap.cuh>
#include <gtest/gtest.h>
#include <iostream>
#include <limits>
#include <vector>
#include <cstdlib>
// This is necessary to do a parametrized typed-test over multiple template
// arguments
template <typename Key, typename Value>
struct KeyValueTypes {
using key_type = Key;
using value_type = Value;
};
// A new instance of this class will be created for each *TEST(MultimapTest,
// ...) Put all repeated stuff for each test here
template <class T>
class MultimapTest : public GdfTest {
public:
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using size_type = int;
using multimap_type =
concurrent_unordered_multimap<key_type, value_type, size_type,
std::numeric_limits<key_type>::max(),
std::numeric_limits<value_type>::max()>;
std::unique_ptr<multimap_type, std::function<void(multimap_type*)>> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = std::numeric_limits<value_type>::max();
const size_type size;
MultimapTest(const size_type hash_table_size = 100)
: the_map(multimap_type::create(hash_table_size)),
size(hash_table_size) {
CUDA_TRY(cudaStreamSynchronize(0));
}
~MultimapTest() {}
};
// Google Test can only do a parameterized typed-test over a single type, so we
// have to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types<
KeyValueTypes<int, int>, KeyValueTypes<int, long long int>,
KeyValueTypes<int, unsigned long long int>,
KeyValueTypes<unsigned long long int, int>,
KeyValueTypes<unsigned long long int, long long int>,
KeyValueTypes<unsigned long long int, unsigned long long int>>
Implementations;
TYPED_TEST_CASE(MultimapTest, Implementations);
TYPED_TEST(MultimapTest, InitialState) {
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin, end);
}
TYPED_TEST(MultimapTest, CheckUnusedValues) {
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
|
42f70f70ede539736a71fab0e0bca0292ff9cb77.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THH/THHGeneral.h>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of hiprand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
uniform_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
}} // namespace at::native
|
42f70f70ede539736a71fab0e0bca0292ff9cb77.cu
|
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THC/THCGeneral.h>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of curand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
uniform_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
}} // namespace at::native
|
3fb154cd12d0bfe7a6dc67f816e6c6a540f2fc5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfx.cu normal z -> s, Tue Sep 2 12:38:15 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x>it it perform w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_S_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, T+i*N, i);
else
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
3fb154cd12d0bfe7a6dc67f816e6c6a540f2fc5a.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfx.cu normal z -> s, Tue Sep 2 12:38:15 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x>it it perform w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_S_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, T+i*N, i);
else
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
6d36921d8611e9c56a97f15961c852efd087f41b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Test Programm nach:
https://www.thomas-krenn.com/de/wiki/CUDA_Programmierung
*/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
// Vars
// Host-Vars
int* h_A;
int* h_B;
int* h_C;
// Device-Vars
int* d_A;
int* d_B;
int* d_C;
// Prototypes
void RandomInit(int* data, int n);
int CheckResults(int* A, int* B, int* C, int n);
// Kernel
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(void) {
printf("Vector addtion\n");
//int i;
int N = 100000 * 1000;
size_t size = N * sizeof(int);
// Speicher auf Host allozieren
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Random Init
RandomInit(h_A, N);
RandomInit(h_B, N);
// Speicher auf Device allozieren
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Vektoren zum Device kopieren
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Kernelaufruf
// Nvidia GTX 1080 TI hat 1024 Threads pro Block
int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("BlocksPerGrid = %i, ThreadsPerBlock = %i\n\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// Auf das Gert warten
hipDeviceSynchronize();
// Ergebnis auf Host kopieren
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Ergebnisse prfen
if (CheckResults(h_A, h_B, h_C, N) == 0)
printf("Alles ok!\n");
else
printf("Fehler\n");
// Speicherfreigabe
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// Vector mit Zufallszahlen fllen
void RandomInit(int* data, int n) {
for (int i = 0; i < n; i++)
data[i] = rand() % (int) 100;
}
// Ergebnis Prfen
int CheckResults(int* A, int* B, int* C, int n) {
int i;
for (i = 0; i < n; i++) {
if ((A[i]+B[i]) != C[i])
return -1;
}
return 0;
}
|
6d36921d8611e9c56a97f15961c852efd087f41b.cu
|
/*
Test Programm nach:
https://www.thomas-krenn.com/de/wiki/CUDA_Programmierung
*/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
// Vars
// Host-Vars
int* h_A;
int* h_B;
int* h_C;
// Device-Vars
int* d_A;
int* d_B;
int* d_C;
// Prototypes
void RandomInit(int* data, int n);
int CheckResults(int* A, int* B, int* C, int n);
// Kernel
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(void) {
printf("Vector addtion\n");
//int i;
int N = 100000 * 1000;
size_t size = N * sizeof(int);
// Speicher auf Host allozieren
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Random Init
RandomInit(h_A, N);
RandomInit(h_B, N);
// Speicher auf Device allozieren
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Vektoren zum Device kopieren
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Kernelaufruf
// Nvidia GTX 1080 TI hat 1024 Threads pro Block
int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("BlocksPerGrid = %i, ThreadsPerBlock = %i\n\n", blocksPerGrid, threadsPerBlock);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Auf das Gerät warten
cudaDeviceSynchronize();
// Ergebnis auf Host kopieren
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Ergebnisse prüfen
if (CheckResults(h_A, h_B, h_C, N) == 0)
printf("Alles ok!\n");
else
printf("Fehler\n");
// Speicherfreigabe
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// Vector mit Zufallszahlen füllen
void RandomInit(int* data, int n) {
for (int i = 0; i < n; i++)
data[i] = rand() % (int) 100;
}
// Ergebnis Prüfen
int CheckResults(int* A, int* B, int* C, int n) {
int i;
for (i = 0; i < n; i++) {
if ((A[i]+B[i]) != C[i])
return -1;
}
return 0;
}
|
7500ffe554832571b9563228f387f36e024d7dd9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// fermi
// Avoid mangling of function names
extern "C" {
__global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b);
}
__global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b) {
const int ttj = threadIdx.x;
const int wtj = threadIdx.y;
const int bj = blockIdx.x;
const int bi = blockIdx.y;
__shared__ float l_a[2048];
float sums[16];
for (int ei = 0; ei < 16; ei++) {
sums[ei] = 0.0;
}
for (int l = 0; l < p / 128; l++) {
for (int ei = 0; ei < 16; ei++) {
l_a[32 * wtj + ttj + 128 * ei] = a[32 * wtj + ttj + 128 * l + (ei
+ 16 * bi) * (128 * (p / 128))];
}
__syncthreads();
for (int k2 = 0; k2 < p / (p / 128); k2++) {
const float bkj = b[128 * bj + (32 * wtj + ttj) + (l * p / (p /
128) + k2) * m];
for (int ei = 0; ei < 16; ei++) {
sums[ei] += l_a[k2 + 128 * ei] * bkj;
}
}
__syncthreads();
}
for (int ei = 0; ei < 16; ei++) {
c[32 * wtj + ttj + 128 * bj + (ei + 16 * bi) * (128 * (m / 128))] +=
sums[ei];
}
}
|
7500ffe554832571b9563228f387f36e024d7dd9.cu
|
// fermi
// Avoid mangling of function names
extern "C" {
__global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b);
}
__global__ void matmulKernel (int n, int m, int p, float* c, const float* a, const float* b) {
const int ttj = threadIdx.x;
const int wtj = threadIdx.y;
const int bj = blockIdx.x;
const int bi = blockIdx.y;
__shared__ float l_a[2048];
float sums[16];
for (int ei = 0; ei < 16; ei++) {
sums[ei] = 0.0;
}
for (int l = 0; l < p / 128; l++) {
for (int ei = 0; ei < 16; ei++) {
l_a[32 * wtj + ttj + 128 * ei] = a[32 * wtj + ttj + 128 * l + (ei
+ 16 * bi) * (128 * (p / 128))];
}
__syncthreads();
for (int k2 = 0; k2 < p / (p / 128); k2++) {
const float bkj = b[128 * bj + (32 * wtj + ttj) + (l * p / (p /
128) + k2) * m];
for (int ei = 0; ei < 16; ei++) {
sums[ei] += l_a[k2 + 128 * ei] * bkj;
}
}
__syncthreads();
}
for (int ei = 0; ei < 16; ei++) {
c[32 * wtj + ttj + 128 * bj + (ei + 16 * bi) * (128 * (m / 128))] +=
sums[ei];
}
}
|
06f81eb9e9c6c857e77dd29e7c03097643c90515.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hipcub/hipcub.hpp>
#include <cstdlib>
#include <sys/time.h>
#include <unistd.h>
using namespace std;
#define cudaSucceeded(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess) {
std::cerr << "cudaAssert failed: "
<< hipGetErrorString(code)
<< file << ":" << line
<< std::endl;
if (abort) {
exit(code);
}
}
}
static struct timeval t_start, t_end;
void start_timing() {
cudaSucceeded(hipDeviceSynchronize());
gettimeofday(&t_start, NULL);
}
void end_timing() {
cudaSucceeded(hipDeviceSynchronize());
gettimeofday(&t_end, NULL);
}
int get_us() {
return (t_end.tv_sec*1000000+t_end.tv_usec) - (t_start.tv_sec*1000000+t_start.tv_usec);
}
class make_option {
private:
__host__ __device__ static
float horner(float x) {
float c1 = 0.31938153, c2 = -0.356563782, c3 = 1.781477937, c4 = -1.821255978, c5 = 1.330274429;
return x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * c5))));
}
__host__ __device__ static
float cnd0(float d) {
float k = 1.0 / (1.0 + 0.2316419 * abs(d));
float p = horner(k);
float rsqrt2pi = 0.39894228040143267793994605993438;
return rsqrt2pi * exp(-0.5*d*d) * p;
}
__host__ __device__ static
float cnd(float d) {
float c = cnd0(d);
return 0.0 < d ? 1.0 - c : c;
}
int i;
const double *d_rs;
const double *d_vs;
const int days;
typedef make_option self_type;
public:
__host__ __device__
make_option(int i, const double *d_rs, const double *d_vs, int days) :
i(i), d_rs(d_rs), d_vs(d_vs), days(days) {}
typedef std::random_access_iterator_tag iterator_category;
typedef double value_type;
typedef int difference_type;
typedef double* pointer;
typedef double reference;
__host__ __device__
double value_at(int i) const {
int option = i / days;
int day = i % days;
double r = d_rs[option];
double v = d_vs[option];
bool call = day % 2 == 0;
double price = 58 + 5 * (1+day)/double(days);
double strike = 65;
double years = (1+day)/365.0;
double v_sqrtT = v * sqrt(years);
double d1 = (log(price / strike) + (r + 0.5 * v * v) * years) / v_sqrtT;
double d2 = d1 - v_sqrtT;
double cndD1 = cnd(d1);
double cndD2 = cnd(d2);
double x_expRT = strike * exp(-r * years);
if (call) {
return price * cndD1 - x_expRT * cndD2;
} else {
return x_expRT * (1.0 - cndD2) - price * (1.0 - cndD1);
}
}
__device__
double operator*() const {
return value_at(i);
}
__host__ __device__ self_type operator++(int)
{
self_type retval = *this;
i++;
return retval;
}
__host__ __device__ __forceinline__ self_type operator++()
{
i++;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(i + int(n), d_rs, d_vs, days);
return retval;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
i += (int) n;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(i - (int)n, d_rs, d_vs, days);
return retval;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
i -= n;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
return value_at(i+(int)n);
}
};
int main(int argc, char** argv) {
int num_segments = pow(2,atoi(argv[1]));
int segment_size = pow(2,atoi(argv[2]));
cerr << num_segments << " segments of " << segment_size << " elements each" << endl;
int *h_offsets = new int[num_segments+1];
for (int i = 0; i < num_segments+1; i++) {
h_offsets[i] = i * segment_size;
}
double *h_rs = new double[num_segments];
double *h_vs = new double[num_segments];
srand(31337);
for (int i = 0; i < num_segments; i++) {
h_rs[i] = rand()/double(RAND_MAX);
h_vs[i] = rand()/double(RAND_MAX);
}
int *d_offsets;
double *d_rs;
double *d_vs;
int *d_out;
cudaSucceeded(hipMalloc(&d_offsets, (num_segments+1)*sizeof(int)));
cudaSucceeded(hipMalloc(&d_rs, num_segments*sizeof(double)));
cudaSucceeded(hipMalloc(&d_vs, num_segments*sizeof(double)));
cudaSucceeded(hipMalloc(&d_out, num_segments*sizeof(double)));
cudaSucceeded(hipMemcpy(d_offsets, h_offsets, (num_segments+1)*sizeof(int),
hipMemcpyHostToDevice));
cudaSucceeded(hipMemcpy(d_rs, h_rs, num_segments*sizeof(double),
hipMemcpyHostToDevice));
cudaSucceeded(hipMemcpy(d_vs, h_vs, num_segments*sizeof(double),
hipMemcpyHostToDevice));
cudaSucceeded(hipDeviceSynchronize());
void *d_temp_storage = NULL;
// Now time.
static const int num_runs = 100;
int total_us = 0;
size_t temp_storage_bytes = 0;
cudaSucceeded(hipcub::DeviceSegmentedReduce::Sum
(d_temp_storage, temp_storage_bytes,
make_option(0, d_rs, d_vs, segment_size), d_out,
num_segments,
d_offsets, d_offsets + 1));
cudaSucceeded(hipMalloc(&d_out, num_segments*sizeof(int)));
cudaSucceeded(hipMalloc(&d_temp_storage, temp_storage_bytes));
for (int i = 0; i < num_runs; i++) {
start_timing();
cudaSucceeded(hipcub::DeviceSegmentedReduce::Sum
(d_temp_storage, temp_storage_bytes,
make_option(0, d_rs, d_vs, segment_size), d_out,
num_segments,
d_offsets, d_offsets + 1));
end_timing();
total_us += get_us();
}
cerr << total_us/num_runs << "us" << endl;
if (!isatty(1)) {
cout << total_us/num_runs;
}
int * h_out = new int[num_segments];
cudaSucceeded(hipMemcpy(h_out, d_out, num_segments*sizeof(int),
hipMemcpyDeviceToHost));
// No validation; trust CUB.
return 0;
}
|
06f81eb9e9c6c857e77dd29e7c03097643c90515.cu
|
#include <cub/cub.cuh>
#include <cstdlib>
#include <sys/time.h>
#include <unistd.h>
using namespace std;
#define cudaSucceeded(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
std::cerr << "cudaAssert failed: "
<< cudaGetErrorString(code)
<< file << ":" << line
<< std::endl;
if (abort) {
exit(code);
}
}
}
static struct timeval t_start, t_end;
void start_timing() {
cudaSucceeded(cudaDeviceSynchronize());
gettimeofday(&t_start, NULL);
}
void end_timing() {
cudaSucceeded(cudaDeviceSynchronize());
gettimeofday(&t_end, NULL);
}
int get_us() {
return (t_end.tv_sec*1000000+t_end.tv_usec) - (t_start.tv_sec*1000000+t_start.tv_usec);
}
class make_option {
private:
__host__ __device__ static
float horner(float x) {
float c1 = 0.31938153, c2 = -0.356563782, c3 = 1.781477937, c4 = -1.821255978, c5 = 1.330274429;
return x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * c5))));
}
__host__ __device__ static
float cnd0(float d) {
float k = 1.0 / (1.0 + 0.2316419 * abs(d));
float p = horner(k);
float rsqrt2pi = 0.39894228040143267793994605993438;
return rsqrt2pi * exp(-0.5*d*d) * p;
}
__host__ __device__ static
float cnd(float d) {
float c = cnd0(d);
return 0.0 < d ? 1.0 - c : c;
}
int i;
const double *d_rs;
const double *d_vs;
const int days;
typedef make_option self_type;
public:
__host__ __device__
make_option(int i, const double *d_rs, const double *d_vs, int days) :
i(i), d_rs(d_rs), d_vs(d_vs), days(days) {}
typedef std::random_access_iterator_tag iterator_category;
typedef double value_type;
typedef int difference_type;
typedef double* pointer;
typedef double reference;
__host__ __device__
double value_at(int i) const {
int option = i / days;
int day = i % days;
double r = d_rs[option];
double v = d_vs[option];
bool call = day % 2 == 0;
double price = 58 + 5 * (1+day)/double(days);
double strike = 65;
double years = (1+day)/365.0;
double v_sqrtT = v * sqrt(years);
double d1 = (log(price / strike) + (r + 0.5 * v * v) * years) / v_sqrtT;
double d2 = d1 - v_sqrtT;
double cndD1 = cnd(d1);
double cndD2 = cnd(d2);
double x_expRT = strike * exp(-r * years);
if (call) {
return price * cndD1 - x_expRT * cndD2;
} else {
return x_expRT * (1.0 - cndD2) - price * (1.0 - cndD1);
}
}
__device__
double operator*() const {
return value_at(i);
}
__host__ __device__ self_type operator++(int)
{
self_type retval = *this;
i++;
return retval;
}
__host__ __device__ __forceinline__ self_type operator++()
{
i++;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(i + int(n), d_rs, d_vs, days);
return retval;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
i += (int) n;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(i - (int)n, d_rs, d_vs, days);
return retval;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
i -= n;
return *this;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
return value_at(i+(int)n);
}
};
int main(int argc, char** argv) {
int num_segments = pow(2,atoi(argv[1]));
int segment_size = pow(2,atoi(argv[2]));
cerr << num_segments << " segments of " << segment_size << " elements each" << endl;
int *h_offsets = new int[num_segments+1];
for (int i = 0; i < num_segments+1; i++) {
h_offsets[i] = i * segment_size;
}
double *h_rs = new double[num_segments];
double *h_vs = new double[num_segments];
srand(31337);
for (int i = 0; i < num_segments; i++) {
h_rs[i] = rand()/double(RAND_MAX);
h_vs[i] = rand()/double(RAND_MAX);
}
int *d_offsets;
double *d_rs;
double *d_vs;
int *d_out;
cudaSucceeded(cudaMalloc(&d_offsets, (num_segments+1)*sizeof(int)));
cudaSucceeded(cudaMalloc(&d_rs, num_segments*sizeof(double)));
cudaSucceeded(cudaMalloc(&d_vs, num_segments*sizeof(double)));
cudaSucceeded(cudaMalloc(&d_out, num_segments*sizeof(double)));
cudaSucceeded(cudaMemcpy(d_offsets, h_offsets, (num_segments+1)*sizeof(int),
cudaMemcpyHostToDevice));
cudaSucceeded(cudaMemcpy(d_rs, h_rs, num_segments*sizeof(double),
cudaMemcpyHostToDevice));
cudaSucceeded(cudaMemcpy(d_vs, h_vs, num_segments*sizeof(double),
cudaMemcpyHostToDevice));
cudaSucceeded(cudaDeviceSynchronize());
void *d_temp_storage = NULL;
// Now time.
static const int num_runs = 100;
int total_us = 0;
size_t temp_storage_bytes = 0;
cudaSucceeded(cub::DeviceSegmentedReduce::Sum
(d_temp_storage, temp_storage_bytes,
make_option(0, d_rs, d_vs, segment_size), d_out,
num_segments,
d_offsets, d_offsets + 1));
cudaSucceeded(cudaMalloc(&d_out, num_segments*sizeof(int)));
cudaSucceeded(cudaMalloc(&d_temp_storage, temp_storage_bytes));
for (int i = 0; i < num_runs; i++) {
start_timing();
cudaSucceeded(cub::DeviceSegmentedReduce::Sum
(d_temp_storage, temp_storage_bytes,
make_option(0, d_rs, d_vs, segment_size), d_out,
num_segments,
d_offsets, d_offsets + 1));
end_timing();
total_us += get_us();
}
cerr << total_us/num_runs << "us" << endl;
if (!isatty(1)) {
cout << total_us/num_runs;
}
int * h_out = new int[num_segments];
cudaSucceeded(cudaMemcpy(h_out, d_out, num_segments*sizeof(int),
cudaMemcpyDeviceToHost));
// No validation; trust CUB.
return 0;
}
|
7ee4bd787303ffe9066bff9f15524d60aaf88001.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SetMatrixVauleMinMaxY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matrix = NULL;
hipMalloc(&matrix, XSIZE*YSIZE);
int cols = YSIZE;
int size = XSIZE*YSIZE;
int id_min = 1;
int id_max = 1;
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SetMatrixVauleMinMaxY), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,cols,size,id_min,id_max,value);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SetMatrixVauleMinMaxY), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,cols,size,id_min,id_max,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SetMatrixVauleMinMaxY), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,cols,size,id_min,id_max,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7ee4bd787303ffe9066bff9f15524d60aaf88001.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SetMatrixVauleMinMaxY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *matrix = NULL;
cudaMalloc(&matrix, XSIZE*YSIZE);
int cols = YSIZE;
int size = XSIZE*YSIZE;
int id_min = 1;
int id_max = 1;
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SetMatrixVauleMinMaxY<<<gridBlock,threadBlock>>>(matrix,cols,size,id_min,id_max,value);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SetMatrixVauleMinMaxY<<<gridBlock,threadBlock>>>(matrix,cols,size,id_min,id_max,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SetMatrixVauleMinMaxY<<<gridBlock,threadBlock>>>(matrix,cols,size,id_min,id_max,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
14bd36d2dd8020ecbc2c0318e6cef1c16dc7f019.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
}
return result;
}
__global__ void square_block(float *array, int k, int n) {
int index = k * blockIdx.x * blockDim.x + threadIdx.x*k;
int endIndex = k * blockIdx.x * blockDim.x + (threadIdx.x + 1)*k;
for (int i = index; i < endIndex; i++) {
if (i < n) {
array[i] = sqrt(array[i]);
//printf("blockIdx.x=%d , blockDim.x=%d , ti=%d, index=%d, i=%d\n", blockIdx.x, blockDim.x, threadIdx.x, index, i);
}
}
}
void square_cpu(float *array, int n) {
for (int i = 0; i < n; i++) {
array[i] = sqrt(array[i]);
}
}
void initArray(float *a, int size){
int i;
for(i=0; i<size; i++){
a[i] = i*0.01;
}
}
int main(int argc, char**argv) {
int N = atoi(argv[1]);
int threadsPerBlock = atoi(argv[2]);
int k = atoi(argv[3]);
if ((threadsPerBlock % 32 != 0) || (threadsPerBlock > 1024)) {
printf("threadsPerBlock must be multiple of 32 and less than 1024");
exit(1);
}
int size = N * sizeof(float);
float *x = (float*) malloc(size);
float *y = (float*) malloc(size);
initArray(x, N);
memcpy(y, x, size);
clock_t tStart = clock();
square_cpu(y, N);
printf("Time taken by Host: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
float *xd;
checkCuda( hipMalloc(&xd, size) );
checkCuda( hipMemcpy(xd, x, size, hipMemcpyHostToDevice) );
// Call square kernel
int blocksPerGrid = (N/k + threadsPerBlock - 1)/threadsPerBlock;
tStart = clock();
hipLaunchKernelGGL(( square_block), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, xd, k, N);
hipDeviceSynchronize();
printf("Time taken by GPU: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
checkCuda( hipMemcpy(x, xd, size, hipMemcpyDeviceToHost) );
// Error Checking
for (int i = 0; i < N; i++) {
//printf("%d %f %f \n", i, x[i], y[i]);
if (x[i] != y[i]) {
printf("%d %f %f INVALID RESULTS \n", i, x[i], y[i]);
goto finalize;
}
}
printf("Successfull Sum\n");
finalize:
free(x);
free(y);
checkCuda(hipFree(xd));
return 0;
}
|
14bd36d2dd8020ecbc2c0318e6cef1c16dc7f019.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
}
return result;
}
__global__ void square_block(float *array, int k, int n) {
int index = k * blockIdx.x * blockDim.x + threadIdx.x*k;
int endIndex = k * blockIdx.x * blockDim.x + (threadIdx.x + 1)*k;
for (int i = index; i < endIndex; i++) {
if (i < n) {
array[i] = sqrt(array[i]);
//printf("blockIdx.x=%d , blockDim.x=%d , ti=%d, index=%d, i=%d\n", blockIdx.x, blockDim.x, threadIdx.x, index, i);
}
}
}
void square_cpu(float *array, int n) {
for (int i = 0; i < n; i++) {
array[i] = sqrt(array[i]);
}
}
void initArray(float *a, int size){
int i;
for(i=0; i<size; i++){
a[i] = i*0.01;
}
}
int main(int argc, char**argv) {
int N = atoi(argv[1]);
int threadsPerBlock = atoi(argv[2]);
int k = atoi(argv[3]);
if ((threadsPerBlock % 32 != 0) || (threadsPerBlock > 1024)) {
printf("threadsPerBlock must be multiple of 32 and less than 1024");
exit(1);
}
int size = N * sizeof(float);
float *x = (float*) malloc(size);
float *y = (float*) malloc(size);
initArray(x, N);
memcpy(y, x, size);
clock_t tStart = clock();
square_cpu(y, N);
printf("Time taken by Host: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
float *xd;
checkCuda( cudaMalloc(&xd, size) );
checkCuda( cudaMemcpy(xd, x, size, cudaMemcpyHostToDevice) );
// Call square kernel
int blocksPerGrid = (N/k + threadsPerBlock - 1)/threadsPerBlock;
tStart = clock();
square_block<<<blocksPerGrid, threadsPerBlock>>>(xd, k, N);
cudaDeviceSynchronize();
printf("Time taken by GPU: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
checkCuda( cudaMemcpy(x, xd, size, cudaMemcpyDeviceToHost) );
// Error Checking
for (int i = 0; i < N; i++) {
//printf("%d %f %f \n", i, x[i], y[i]);
if (x[i] != y[i]) {
printf("%d %f %f INVALID RESULTS \n", i, x[i], y[i]);
goto finalize;
}
}
printf("Successfull Sum\n");
finalize:
free(x);
free(y);
checkCuda(cudaFree(xd));
return 0;
}
|
40b6c0b9bc8c4d1a4f3e463622b0dd7907983f19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// richu shaji abraham richursa
using namespace std;
__global__ void print(int *d_predicateArrry,int numberOfElements)
{
for(int i=0;i<numberOfElements;i++)
{
printf("index = %d value = %d\n",i,d_predicateArrry[i]);
}
}
|
40b6c0b9bc8c4d1a4f3e463622b0dd7907983f19.cu
|
#include "includes.h"
// richu shaji abraham richursa
using namespace std;
__global__ void print(int *d_predicateArrry,int numberOfElements)
{
for(int i=0;i<numberOfElements;i++)
{
printf("index = %d value = %d\n",i,d_predicateArrry[i]);
}
}
|
4216e5ea5af5584cde9a75ab135451d44a35e26b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hipcub/hipcub.hpp"
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(hipStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit()
{
sspiFuncVec.push_back(sspiLaunchConfig(DataType::kFLOAT,
sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i)
{
if (lc == sspiFuncVec[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
4216e5ea5af5584cde9a75ab135451d44a35e26b.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cub/cub.cuh"
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(cudaStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit()
{
sspiFuncVec.push_back(sspiLaunchConfig(DataType::kFLOAT,
sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i)
{
if (lc == sspiFuncVec[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
11c16f969c56917973d1e60b670d6dc2d7123c38.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void force_aux2(long n, double mx, double my, double *r_gpu, double *f_gpu, float *sinr_gpu, float *cosr_gpu)
{
long tid;
tid=threadIdx.x+blockIdx.x*blockDim.x;
while (tid<n)
{
f_gpu[tid]=cosr_gpu[tid]*my-sinr_gpu[tid]*mx;
tid+=blockDim.x*gridDim.x;
};
return;
}
|
11c16f969c56917973d1e60b670d6dc2d7123c38.cu
|
__global__ void force_aux2(long n, double mx, double my, double *r_gpu, double *f_gpu, float *sinr_gpu, float *cosr_gpu)
{
long tid;
tid=threadIdx.x+blockIdx.x*blockDim.x;
while (tid<n)
{
f_gpu[tid]=cosr_gpu[tid]*my-sinr_gpu[tid]*mx;
tid+=blockDim.x*gridDim.x;
};
return;
}
|
c66ada4149996f47eec69f3dc41a99293fa03868.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1)
{
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
#include "CudaUtilProj.cu.h"
__global__ void
trivial_map(int* inp_d, MyInt4* inp_lift, int inp_size) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < inp_size) {
int el = inp_d[gid];
MyInt4 res(el,el,el,el);
if(el < 0) { res.x = 0; res.y = 0; res.z = 0; }
inp_lift[gid] = res;
}
}
int MsspProblem(int block_size, int inp_size) {
int mem_size = inp_size*sizeof(MyInt4);
int *inp_h = (int*)malloc(inp_size*sizeof(int));
int *inp_d; hipMalloc((void**)&inp_d , inp_size*sizeof(int));
MyInt4 *inp_lift; hipMalloc((void**)&inp_lift, mem_size);
MyInt4 *res_d; hipMalloc((void**)&res_d, mem_size);
for(int i = 0; i < inp_size; i+=9) {
inp_h[i ] = -15;
inp_h[i+1 ] = 2;
inp_h[i+2 ] = -1;
inp_h[i+3 ] = 3;
inp_h[i+4 ] = -2;
inp_h[i+5 ] = 4;
inp_h[i+6 ] = 3;
inp_h[i+7 ] = 1;
inp_h[i+8 ] = -4;
}
inp_h[(inp_size/9)*9 - 18 + 7] = 2;
int num_blocks = ( (inp_size % block_size) == 0) ?
inp_size / block_size :
inp_size / block_size + 1 ;
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
hipMemcpy(inp_d, inp_h, inp_size*sizeof(int), hipMemcpyHostToDevice);
{ // KERNELS
// 1. apply map, i.e., lift each element x to
// (max(x,0),max(x,0), max(x,0), x)
hipLaunchKernelGGL(( trivial_map), dim3(num_blocks), dim3(block_size) , 0, 0, inp_d, inp_lift, inp_size);
hipDeviceSynchronize();
// 2. apply scan with the given operator, i.e.,
// write the apply operator in class MsspOP in
// ScanKernels.cu.h and call scanInc from ScanHost.cu.h
scanInc< MsspOp,MyInt4 > ( block_size, inp_size, inp_lift, res_d );
hipDeviceSynchronize();
}
MyInt4 res_h(0,0,0,0);
// 3. copy back only the last element of the res_d array (of size sizeof(MyInt4))
hipMemcpy(&res_h, res_d+inp_size-1, sizeof(MyInt4), hipMemcpyDeviceToHost);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("MSSP version runs in: %lu microsecs\n", elapsed);
printf("RESULT is: %d %d %d %d\n", res_h.x, res_h.y, res_h.z, res_h.w);
if(res_h.x == 11) {
printf("MSSP VALID EXECUTION!\n");
} else {
printf("MSSP INVALID EXECUTION!\n");
}
free(inp_h);
hipFree(inp_d);
hipFree(inp_lift);
hipFree(res_d);
return 1;
}
int scanIncTest() {
const unsigned int num_threads = 8353455;
const unsigned int block_size = 512;
unsigned int mem_size = num_threads * sizeof(int);
int* h_in = (int*) malloc(mem_size);
int* h_out = (int*) malloc(mem_size);
int* flags_h = (int*) malloc(num_threads*sizeof(int));
int sgm_size = 123;
{ // init segments and flags
for(unsigned int i=0; i<num_threads; i++) {
h_in [i] = 1;
flags_h[i] = (i % sgm_size == 0) ? 1 : 0;
}
}
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
{ // calling exclusive (segmented) scan
int* d_in;
int* d_out;
int* flags_d;
hipMalloc((void**)&d_in , mem_size);
hipMalloc((void**)&d_out, mem_size);
hipMalloc((void**)&flags_d, num_threads*sizeof(int));
// copy host memory to device
hipMemcpy(d_in, h_in, mem_size, hipMemcpyHostToDevice);
hipMemcpy(flags_d, flags_h, num_threads*sizeof(int), hipMemcpyHostToDevice);
// execute kernel
sgmScanInc< Add<int>,int > ( block_size, num_threads, d_in, flags_d, d_out );
// copy host memory to device
hipMemcpy(h_out, d_out, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(d_in );
hipFree(d_out);
hipFree(flags_d);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Scan Inclusive on GPU runs in: %lu microsecs\n", elapsed);
{ // validation
bool success = true;
int accum = 0;
for(int i=0; i<num_threads; i++) {
// for segmented scan exclusive test
if (i % sgm_size == 0) accum = 1;
else accum += 1;
if ( accum != h_out[i] ) {
success = false;
printf("Scan Inclusive Violation: %.1d should be %.1d\n", h_out[i], accum);
}
// for scan exclusive test
// accum += 1;
}
if(success) printf("\nScan Exclusive + VALID RESULT!\n");
else printf("\nScan Exclusive + INVALID RESULT!\n");
}
// cleanup memory
free(h_in );
free(h_out);
free(flags_h);
return 0;
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
template<class T>
bool validateTranspose(float* A,float* trA, unsigned int rowsA, unsigned int colsA){
bool valid = true;
for(unsigned int i = 0; i < rowsA; i++) {
for(unsigned int j = 0; j < colsA; j++) {
if(trA[j*rowsA + i] != A[i*colsA + j]) {
printf("row: %d, col: %d, A: %.4f, trA: %.4f\n",
i, j, A[i*colsA + j], trA[j*rowsA + i] );
valid = false;
break;
}
}
if(!valid) break;
}
if (valid) printf("GPU TRANSPOSITION VALID!\n");
else printf("GPU TRANSPOSITION INVALID!\n");
return valid;
}
void testTranspose() {
// set seed for rand()
srand(2006);
const unsigned int HEIGHT_A = 1024*8;
const unsigned int WIDTH_A = 1024*8;
// 1. allocate host memory for the two matrices
size_t size_A = WIDTH_A * HEIGHT_A;
size_t mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
float* h_B = (float*) malloc(mem_size_A);
// 2. initialize host memory
randomInit(h_A, size_A);
// 3. allocate device memory
float* d_A;
float* d_B;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_A);
// 4. copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
//transpose<float, TILE>( d_A, d_B, HEIGHT_A, WIDTH_A );
transpose<float, 32>( d_A, d_B, HEIGHT_A, WIDTH_A );
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Transpose on GPU runs in: %lu microsecs\n", elapsed);
// copy result from device to host
hipMemcpy(h_B, d_B, mem_size_A, hipMemcpyDeviceToHost);
// 12. validate
//validateTranspose<float>( h_A, h_B, HEIGHT_A, WIDTH_A );
validateTranspose<float>( h_A, h_B, HEIGHT_A, WIDTH_A );
}
// clean up memory
free(h_A);
free(h_B);
hipFree(d_A);
hipFree(d_B);
}
int main(int argc, char** argv) {
const unsigned int mssp_list_size = 8353455;
const unsigned int block_size = 256;
scanIncTest();
MsspProblem(block_size, mssp_list_size);
testTranspose();
}
|
c66ada4149996f47eec69f3dc41a99293fa03868.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1)
{
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
#include "CudaUtilProj.cu.h"
__global__ void
trivial_map(int* inp_d, MyInt4* inp_lift, int inp_size) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < inp_size) {
int el = inp_d[gid];
MyInt4 res(el,el,el,el);
if(el < 0) { res.x = 0; res.y = 0; res.z = 0; }
inp_lift[gid] = res;
}
}
int MsspProblem(int block_size, int inp_size) {
int mem_size = inp_size*sizeof(MyInt4);
int *inp_h = (int*)malloc(inp_size*sizeof(int));
int *inp_d; cudaMalloc((void**)&inp_d , inp_size*sizeof(int));
MyInt4 *inp_lift; cudaMalloc((void**)&inp_lift, mem_size);
MyInt4 *res_d; cudaMalloc((void**)&res_d, mem_size);
for(int i = 0; i < inp_size; i+=9) {
inp_h[i ] = -15;
inp_h[i+1 ] = 2;
inp_h[i+2 ] = -1;
inp_h[i+3 ] = 3;
inp_h[i+4 ] = -2;
inp_h[i+5 ] = 4;
inp_h[i+6 ] = 3;
inp_h[i+7 ] = 1;
inp_h[i+8 ] = -4;
}
inp_h[(inp_size/9)*9 - 18 + 7] = 2;
int num_blocks = ( (inp_size % block_size) == 0) ?
inp_size / block_size :
inp_size / block_size + 1 ;
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
cudaMemcpy(inp_d, inp_h, inp_size*sizeof(int), cudaMemcpyHostToDevice);
{ // KERNELS
// 1. apply map, i.e., lift each element x to
// (max(x,0),max(x,0), max(x,0), x)
trivial_map<<< num_blocks, block_size >>>(inp_d, inp_lift, inp_size);
cudaThreadSynchronize();
// 2. apply scan with the given operator, i.e.,
// write the apply operator in class MsspOP in
// ScanKernels.cu.h and call scanInc from ScanHost.cu.h
scanInc< MsspOp,MyInt4 > ( block_size, inp_size, inp_lift, res_d );
cudaThreadSynchronize();
}
MyInt4 res_h(0,0,0,0);
// 3. copy back only the last element of the res_d array (of size sizeof(MyInt4))
cudaMemcpy(&res_h, res_d+inp_size-1, sizeof(MyInt4), cudaMemcpyDeviceToHost);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("MSSP version runs in: %lu microsecs\n", elapsed);
printf("RESULT is: %d %d %d %d\n", res_h.x, res_h.y, res_h.z, res_h.w);
if(res_h.x == 11) {
printf("MSSP VALID EXECUTION!\n");
} else {
printf("MSSP INVALID EXECUTION!\n");
}
free(inp_h);
cudaFree(inp_d);
cudaFree(inp_lift);
cudaFree(res_d);
return 1;
}
int scanIncTest() {
const unsigned int num_threads = 8353455;
const unsigned int block_size = 512;
unsigned int mem_size = num_threads * sizeof(int);
int* h_in = (int*) malloc(mem_size);
int* h_out = (int*) malloc(mem_size);
int* flags_h = (int*) malloc(num_threads*sizeof(int));
int sgm_size = 123;
{ // init segments and flags
for(unsigned int i=0; i<num_threads; i++) {
h_in [i] = 1;
flags_h[i] = (i % sgm_size == 0) ? 1 : 0;
}
}
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
{ // calling exclusive (segmented) scan
int* d_in;
int* d_out;
int* flags_d;
cudaMalloc((void**)&d_in , mem_size);
cudaMalloc((void**)&d_out, mem_size);
cudaMalloc((void**)&flags_d, num_threads*sizeof(int));
// copy host memory to device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(flags_d, flags_h, num_threads*sizeof(int), cudaMemcpyHostToDevice);
// execute kernel
sgmScanInc< Add<int>,int > ( block_size, num_threads, d_in, flags_d, d_out );
// copy host memory to device
cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(d_in );
cudaFree(d_out);
cudaFree(flags_d);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Scan Inclusive on GPU runs in: %lu microsecs\n", elapsed);
{ // validation
bool success = true;
int accum = 0;
for(int i=0; i<num_threads; i++) {
// for segmented scan exclusive test
if (i % sgm_size == 0) accum = 1;
else accum += 1;
if ( accum != h_out[i] ) {
success = false;
printf("Scan Inclusive Violation: %.1d should be %.1d\n", h_out[i], accum);
}
// for scan exclusive test
// accum += 1;
}
if(success) printf("\nScan Exclusive + VALID RESULT!\n");
else printf("\nScan Exclusive + INVALID RESULT!\n");
}
// cleanup memory
free(h_in );
free(h_out);
free(flags_h);
return 0;
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
template<class T>
bool validateTranspose(float* A,float* trA, unsigned int rowsA, unsigned int colsA){
bool valid = true;
for(unsigned int i = 0; i < rowsA; i++) {
for(unsigned int j = 0; j < colsA; j++) {
if(trA[j*rowsA + i] != A[i*colsA + j]) {
printf("row: %d, col: %d, A: %.4f, trA: %.4f\n",
i, j, A[i*colsA + j], trA[j*rowsA + i] );
valid = false;
break;
}
}
if(!valid) break;
}
if (valid) printf("GPU TRANSPOSITION VALID!\n");
else printf("GPU TRANSPOSITION INVALID!\n");
return valid;
}
void testTranspose() {
// set seed for rand()
srand(2006);
const unsigned int HEIGHT_A = 1024*8;
const unsigned int WIDTH_A = 1024*8;
// 1. allocate host memory for the two matrices
size_t size_A = WIDTH_A * HEIGHT_A;
size_t mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
float* h_B = (float*) malloc(mem_size_A);
// 2. initialize host memory
randomInit(h_A, size_A);
// 3. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_A);
// 4. copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
//transpose<float, TILE>( d_A, d_B, HEIGHT_A, WIDTH_A );
transpose<float, 32>( d_A, d_B, HEIGHT_A, WIDTH_A );
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Transpose on GPU runs in: %lu microsecs\n", elapsed);
// copy result from device to host
cudaMemcpy(h_B, d_B, mem_size_A, cudaMemcpyDeviceToHost);
// 12. validate
//validateTranspose<float>( h_A, h_B, HEIGHT_A, WIDTH_A );
validateTranspose<float>( h_A, h_B, HEIGHT_A, WIDTH_A );
}
// clean up memory
free(h_A);
free(h_B);
cudaFree(d_A);
cudaFree(d_B);
}
int main(int argc, char** argv) {
const unsigned int mssp_list_size = 8353455;
const unsigned int block_size = 256;
scanIncTest();
MsspProblem(block_size, mssp_list_size);
testTranspose();
}
|
143a976faf9e5c8ba20f70dddf1d4262633451fd.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ELUFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void ceil(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CeilFunctor<T>>(stream, output, input);
}
template <class T>
void floor(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, FloorFunctor<T>>(stream, output, input);
}
template <class T>
void log(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, LogFunctor<T>>(stream, output, input);
}
template <class T>
void rint(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, RintFunctor<T>>(stream, output, input);
}
template <class T>
void sqrt(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SqrtFunctor<T>>(stream, output, input);
}
template <class T>
void not_k(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, NotFunctor<T>>(stream, output, input);
}
template <class T>
void acos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcosFunctor<T>>(stream, output, input);
}
template <class T>
void acosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcoshFunctor<T>>(stream, output, input);
}
template <class T>
void asin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinFunctor<T>>(stream, output, input);
}
template <class T>
void asinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinhFunctor<T>>(stream, output, input);
}
template <class T>
void atan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanFunctor<T>>(stream, output, input);
}
template <class T>
void atanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanhFunctor<T>>(stream, output, input);
}
template <class T>
void cos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CosFunctor<T>>(stream, output, input);
}
template <class T>
void cosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CoshFunctor<T>>(stream, output, input);
}
template <class T>
void erf(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ErfFunctor<T>>(stream, output, input);
}
template <class T>
void hardswish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, HardSwishFunctor<T>>(stream, output, input);
}
template <class T>
void sin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinFunctor<T>>(stream, output, input);
}
template <class T>
void sinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinhFunctor<T>>(stream, output, input);
}
template <class T>
void softplus(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftplusFunctor<T>>(stream, output, input);
}
template <class T>
void softsign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftsignFunctor<T>>(stream, output, input);
}
template <class T>
void tan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanFunctor<T>>(stream, output, input);
}
template <class T>
void celu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, CeluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void hardsigmoid(const Stream& stream, Span<T> output, View<T> input, T alpha, T beta) {
generic_op<T, HardSigmoidFunctor<T>>(stream, output, input, {alpha, beta});
}
template <class T>
void selu(const Stream& stream, Span<T> output, View<T> input, T alpha, T gamma) {
generic_op<T, SeluFunctor<T>>(stream, output, input, {alpha, gamma});
}
template <class T>
void gelu(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, GeluFunctor<T>>(stream, output, input);
}
template <class T>
void sign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void shrink(const Stream& stream, Span<T> output, View<T> input, T bias, T lambd) {
generic_op<T, ShrinkFunctor<T>>(stream, output, input, {bias, lambd});
}
template <class T>
void reciprocal(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ReciprocalFunctor<T>>(stream, output, input);
}
template <class T>
void thresholdedrelu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ThresholdedReluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
template <class T>
void exp(const Stream& stream, Span<T> output, View<T> input, T normScale, T normShift) {
generic_op<T, ExpFunctor<T>>(stream, output, input, {normScale, normShift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
template void floor<__half>(const Stream&, Span<__half>, View<__half>);
template void log<__half>(const Stream&, Span<__half>, View<__half>);
template void rint<__half>(const Stream&, Span<__half>, View<__half>);
template void sqrt<__half>(const Stream&, Span<__half>, View<__half>);
template void not_k<__half>(const Stream&, Span<__half>, View<__half>);
template void acos<__half>(const Stream&, Span<__half>, View<__half>);
template void acosh<__half>(const Stream&, Span<__half>, View<__half>);
template void asin<__half>(const Stream&, Span<__half>, View<__half>);
template void asinh<__half>(const Stream&, Span<__half>, View<__half>);
template void atan<__half>(const Stream&, Span<__half>, View<__half>);
template void atanh<__half>(const Stream&, Span<__half>, View<__half>);
template void cos<__half>(const Stream&, Span<__half>, View<__half>);
template void cosh<__half>(const Stream&, Span<__half>, View<__half>);
template void erf<__half>(const Stream&, Span<__half>, View<__half>);
template void hardswish<__half>(const Stream&, Span<__half>, View<__half>);
template void sin<__half>(const Stream&, Span<__half>, View<__half>);
template void sinh<__half>(const Stream&, Span<__half>, View<__half>);
template void softplus<__half>(const Stream&, Span<__half>, View<__half>);
template void softsign<__half>(const Stream&, Span<__half>, View<__half>);
template void tan<__half>(const Stream&, Span<__half>, View<__half>);
template void celu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void hardsigmoid<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void selu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void gelu<__half>(const Stream&, Span<__half>, View<__half>);
template void thresholdedrelu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void sign<__half>(const Stream&, Span<__half>, View<__half>);
template void shrink<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void reciprocal<__half>(const Stream&, Span<__half>, View<__half>);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>, float);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void ceil<float>(const Stream&, Span<float>, View<float>);
template void floor<float>(const Stream&, Span<float>, View<float>);
template void log<float>(const Stream&, Span<float>, View<float>);
template void rint<float>(const Stream&, Span<float>, View<float>);
template void sqrt<float>(const Stream&, Span<float>, View<float>);
template void not_k<float>(const Stream&, Span<float>, View<float>);
template void acos<float>(const Stream&, Span<float>, View<float>);
template void acosh<float>(const Stream&, Span<float>, View<float>);
template void asin<float>(const Stream&, Span<float>, View<float>);
template void asinh<float>(const Stream&, Span<float>, View<float>);
template void atan<float>(const Stream&, Span<float>, View<float>);
template void atanh<float>(const Stream&, Span<float>, View<float>);
template void cos<float>(const Stream&, Span<float>, View<float>);
template void cosh<float>(const Stream&, Span<float>, View<float>);
template void erf<float>(const Stream&, Span<float>, View<float>);
template void hardswish<float>(const Stream&, Span<float>, View<float>);
template void sin<float>(const Stream&, Span<float>, View<float>);
template void sinh<float>(const Stream&, Span<float>, View<float>);
template void softplus<float>(const Stream&, Span<float>, View<float>);
template void softsign<float>(const Stream&, Span<float>, View<float>);
template void tan<float>(const Stream&, Span<float>, View<float>);
template void celu<float>(const Stream&, Span<float>, View<float>, float);
template void hardsigmoid<float>(const Stream&, Span<float>, View<float>, float, float);
template void selu<float>(const Stream&, Span<float>, View<float>, float, float);
template void gelu<float>(const Stream&, Span<float>, View<float>);
template void thresholdedrelu<float>(const Stream&, Span<float>, View<float>, float);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template void exp<float>(const Stream&, Span<float>, View<float>, float, float);
template void sign<float>(const Stream&, Span<float>, View<float>);
template void shrink<float>(const Stream&, Span<float>, View<float>, float, float);
template void reciprocal<float>(const Stream&, Span<float>, View<float>);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
143a976faf9e5c8ba20f70dddf1d4262633451fd.cu
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ELUFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void ceil(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CeilFunctor<T>>(stream, output, input);
}
template <class T>
void floor(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, FloorFunctor<T>>(stream, output, input);
}
template <class T>
void log(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, LogFunctor<T>>(stream, output, input);
}
template <class T>
void rint(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, RintFunctor<T>>(stream, output, input);
}
template <class T>
void sqrt(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SqrtFunctor<T>>(stream, output, input);
}
template <class T>
void not_k(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, NotFunctor<T>>(stream, output, input);
}
template <class T>
void acos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcosFunctor<T>>(stream, output, input);
}
template <class T>
void acosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcoshFunctor<T>>(stream, output, input);
}
template <class T>
void asin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinFunctor<T>>(stream, output, input);
}
template <class T>
void asinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinhFunctor<T>>(stream, output, input);
}
template <class T>
void atan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanFunctor<T>>(stream, output, input);
}
template <class T>
void atanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanhFunctor<T>>(stream, output, input);
}
template <class T>
void cos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CosFunctor<T>>(stream, output, input);
}
template <class T>
void cosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CoshFunctor<T>>(stream, output, input);
}
template <class T>
void erf(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ErfFunctor<T>>(stream, output, input);
}
template <class T>
void hardswish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, HardSwishFunctor<T>>(stream, output, input);
}
template <class T>
void sin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinFunctor<T>>(stream, output, input);
}
template <class T>
void sinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinhFunctor<T>>(stream, output, input);
}
template <class T>
void softplus(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftplusFunctor<T>>(stream, output, input);
}
template <class T>
void softsign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftsignFunctor<T>>(stream, output, input);
}
template <class T>
void tan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanFunctor<T>>(stream, output, input);
}
template <class T>
void celu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, CeluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void hardsigmoid(const Stream& stream, Span<T> output, View<T> input, T alpha, T beta) {
generic_op<T, HardSigmoidFunctor<T>>(stream, output, input, {alpha, beta});
}
template <class T>
void selu(const Stream& stream, Span<T> output, View<T> input, T alpha, T gamma) {
generic_op<T, SeluFunctor<T>>(stream, output, input, {alpha, gamma});
}
template <class T>
void gelu(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, GeluFunctor<T>>(stream, output, input);
}
template <class T>
void sign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void shrink(const Stream& stream, Span<T> output, View<T> input, T bias, T lambd) {
generic_op<T, ShrinkFunctor<T>>(stream, output, input, {bias, lambd});
}
template <class T>
void reciprocal(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ReciprocalFunctor<T>>(stream, output, input);
}
template <class T>
void thresholdedrelu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ThresholdedReluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
template <class T>
void exp(const Stream& stream, Span<T> output, View<T> input, T normScale, T normShift) {
generic_op<T, ExpFunctor<T>>(stream, output, input, {normScale, normShift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
template void floor<__half>(const Stream&, Span<__half>, View<__half>);
template void log<__half>(const Stream&, Span<__half>, View<__half>);
template void rint<__half>(const Stream&, Span<__half>, View<__half>);
template void sqrt<__half>(const Stream&, Span<__half>, View<__half>);
template void not_k<__half>(const Stream&, Span<__half>, View<__half>);
template void acos<__half>(const Stream&, Span<__half>, View<__half>);
template void acosh<__half>(const Stream&, Span<__half>, View<__half>);
template void asin<__half>(const Stream&, Span<__half>, View<__half>);
template void asinh<__half>(const Stream&, Span<__half>, View<__half>);
template void atan<__half>(const Stream&, Span<__half>, View<__half>);
template void atanh<__half>(const Stream&, Span<__half>, View<__half>);
template void cos<__half>(const Stream&, Span<__half>, View<__half>);
template void cosh<__half>(const Stream&, Span<__half>, View<__half>);
template void erf<__half>(const Stream&, Span<__half>, View<__half>);
template void hardswish<__half>(const Stream&, Span<__half>, View<__half>);
template void sin<__half>(const Stream&, Span<__half>, View<__half>);
template void sinh<__half>(const Stream&, Span<__half>, View<__half>);
template void softplus<__half>(const Stream&, Span<__half>, View<__half>);
template void softsign<__half>(const Stream&, Span<__half>, View<__half>);
template void tan<__half>(const Stream&, Span<__half>, View<__half>);
template void celu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void hardsigmoid<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void selu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void gelu<__half>(const Stream&, Span<__half>, View<__half>);
template void thresholdedrelu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void sign<__half>(const Stream&, Span<__half>, View<__half>);
template void shrink<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void reciprocal<__half>(const Stream&, Span<__half>, View<__half>);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>, float);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void ceil<float>(const Stream&, Span<float>, View<float>);
template void floor<float>(const Stream&, Span<float>, View<float>);
template void log<float>(const Stream&, Span<float>, View<float>);
template void rint<float>(const Stream&, Span<float>, View<float>);
template void sqrt<float>(const Stream&, Span<float>, View<float>);
template void not_k<float>(const Stream&, Span<float>, View<float>);
template void acos<float>(const Stream&, Span<float>, View<float>);
template void acosh<float>(const Stream&, Span<float>, View<float>);
template void asin<float>(const Stream&, Span<float>, View<float>);
template void asinh<float>(const Stream&, Span<float>, View<float>);
template void atan<float>(const Stream&, Span<float>, View<float>);
template void atanh<float>(const Stream&, Span<float>, View<float>);
template void cos<float>(const Stream&, Span<float>, View<float>);
template void cosh<float>(const Stream&, Span<float>, View<float>);
template void erf<float>(const Stream&, Span<float>, View<float>);
template void hardswish<float>(const Stream&, Span<float>, View<float>);
template void sin<float>(const Stream&, Span<float>, View<float>);
template void sinh<float>(const Stream&, Span<float>, View<float>);
template void softplus<float>(const Stream&, Span<float>, View<float>);
template void softsign<float>(const Stream&, Span<float>, View<float>);
template void tan<float>(const Stream&, Span<float>, View<float>);
template void celu<float>(const Stream&, Span<float>, View<float>, float);
template void hardsigmoid<float>(const Stream&, Span<float>, View<float>, float, float);
template void selu<float>(const Stream&, Span<float>, View<float>, float, float);
template void gelu<float>(const Stream&, Span<float>, View<float>);
template void thresholdedrelu<float>(const Stream&, Span<float>, View<float>, float);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template void exp<float>(const Stream&, Span<float>, View<float>, float, float);
template void sign<float>(const Stream&, Span<float>, View<float>);
template void shrink<float>(const Stream&, Span<float>, View<float>, float, float);
template void reciprocal<float>(const Stream&, Span<float>, View<float>);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
da7ebbb484e22e9327a05caf26f83b2125e651fd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cugl_ros/apps/dynamic_fusion/cuda_internal.h>
#include <cugl_ros/apps/dynamic_fusion/cuda_impl/reduce.cuh>
namespace dynfu
{
namespace gpu
{
texture<float, 1, hipReadModeElementType> vec_texture;
__device__ uint blocks_done_new = 0;
__device__ float global_value_new = 0.f;
__device__ float output_value_new;
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
void bindVecTexture(const DeviceArray<float> &vec_data)
{
hipChannelFormatDesc vec_desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture(0, vec_texture, reinterpret_cast<const float *>(vec_data.getDevicePtr()), vec_desc));
}
void unbindVecTexture()
{
checkCudaErrors(hipUnbindTexture(vec_texture));
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__device__
bool computeMat6x6Inverse(float *s[6])
{
float scp[6];
#pragma unroll
for (int k = 0; k < 6; k++)
{
scp[k] = fabsf(s[k][0]);
for (int j = 1; j < 6; j++)
{
if (fabsf(s[k][j]) > scp[k])
{
scp[k] = fabsf(s[k][j]);
}
}
if (scp[k] == 0.f)
return false;
}
int pivot_to;
float scp_max;
#pragma unroll
for (int k = 0; k < 6; k++)
{
// select pivot row
pivot_to = k;
scp_max = fabsf(s[k][k] / scp[k]);
// find out which row should be on top
for (int p = k + 1; p < 6; p++)
{
if (fabsf(s[p][k] / scp[p]) > scp_max)
{
scp_max = fabsf(s[p][k] / scp[p]);
pivot_to = p;
}
}
// Pivot if necessary
if (pivot_to != k)
{
float *tmprow;
tmprow = s[k];
s[k] = s[pivot_to];
s[pivot_to] = tmprow;
float tmpscp;
tmpscp = scp[k];
scp[k] = scp[pivot_to];
scp[pivot_to] = tmpscp;
}
float mjk;
// perform gaussian elimination
for (int j = k + 1; j < 6; j++)
{
mjk = s[j][k] / s[k][k];
s[j][k] = 0.f;
for (int jj = k + 1; jj < 12; jj++)
{
s[j][jj] -= mjk * s[k][jj];
}
}
}
if (fabsf(s[5][5]) < 5e-3)
{
return false; // singular matrix!
}
//
// Now we have an upper triangular matrix.
//
// x x x x x x | y y y y y y
// 0 x x x x x | y y y y y y
// 0 0 x x x x | y y y y y y
// 0 0 0 x x x | y y y y y y
// 0 0 0 0 x x | y y y y y y
// 0 0 0 0 0 x | y y y y y y
//
// we'll back substitute to get the inverse
//
// 1 0 0 0 0 0 | z z z z z z
// 0 1 0 0 0 0 | z z z z z z
// 0 0 1 0 0 0 | z z z z z z
// 0 0 0 1 0 0 | z z z z z z
// 0 0 0 0 1 0 | z z z z z z
// 0 0 0 0 0 1 | z z z z z z
//
float mjk;
for (int k = 5; k > 0; k--)
{
for (int j = k - 1; j > -1; j--)
{
mjk = s[j][k] / s[k][k];
for (int jj= j + 1; jj < 12; jj++)
{
s[j][jj] -= mjk * s[k][jj];
}
}
}
return true;
}
__device__
void writeDiagBlockInverse(float *src[6], int entry_start, float *dst)
{
int entry = entry_start;
for (int row = 0; row < 6; row++)
{
for (int col = 0; col < 6; col++)
{
dst[entry++] = src[row][col+6] / src[row][row];
}
}
}
__device__
void writeDiagInverse(int entry_start, const float *src, float *dst)
{
int entry = entry_start;
for (int row = 0; row < 6; row++)
{
for (int col = 0; col < 6; col++)
{
if (row == col)
{
float val = src[entry];
float val_inv = (fabsf(val) < 1e-4) ? 0.f : (1.f / val);
dst[entry] = val_inv;
}
else
{
dst[entry] = 0.f;
}
entry++;
}
}
}
__global__
void computePrecondKernel(int precond_mat_block_size,
float damping,
const SpMatParamList spmat_param_list,
SpMatDataList spmat_data_list)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < precond_mat_block_size;
i += stride)
{
float r1[12], r2[12], r3[12], r4[12], r5[12], r6[12];
float *s[6];
s[0] = &r1[0];
s[1] = &r2[0];
s[2] = &r3[0];
s[3] = &r4[0];
s[4] = &r5[0];
s[5] = &r6[0];
// initialization
int diag_mat_entry_start = 36 * i;
int diag_mat_entry = diag_mat_entry_start;
for (int row = 0; row < 6; row++)
{
#pragma unroll
for (int col = 0; col < 6; col++)
{
if (row == col)
{
float inc = (row < 3) ? (2.f * damping) : damping;
spmat_data_list.diagonal[diag_mat_entry] += inc;
s[row][col] = spmat_data_list.diagonal[diag_mat_entry];
s[row][col+6] = 1.f;
}
else
{
s[row][col] = spmat_data_list.diagonal[diag_mat_entry];
s[row][col+6] = 0.f;
}
diag_mat_entry++;
}
}
bool find_inverse = computeMat6x6Inverse(s);
if (find_inverse)
{
writeDiagBlockInverse(s, diag_mat_entry_start, spmat_data_list.precond);
}
else
{
writeDiagInverse(diag_mat_entry_start, spmat_data_list.diagonal, spmat_data_list.precond);
}
} // grid stride loop
}
void computePreconditioner(float damping,
const SpMatParamList &spmat_param_list,
SpMatDataList &spmat_data_list)
{
int precond_mat_block_size = static_cast<int>(spmat_param_list.rows / spmat_param_list.block_dim);
int block = 256;
int grid = min(divUp(precond_mat_block_size, block), 512);
hipLaunchKernelGGL(( computePrecondKernel), dim3(grid), dim3(block), 0, 0, precond_mat_block_size,
damping,
spmat_param_list,
spmat_data_list);
checkCudaErrors(hipGetLastError());
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__global__
void initPcgSolverKernel(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> x,
DeviceArrayHandle<float> r,
DeviceArrayHandle<float> z,
DeviceArrayHandle<float> p)
{
const int stride = blockDim.x * gridDim.x;
float rz = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float z_val = 0.f;
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k; // row major
int diag_mat_col = diag_mat_col_start + k;
float precond_val = spmat_data_list.precond[diag_mat_entry];
float b_val = tex1Dfetch(vec_texture, diag_mat_col);
z_val += precond_val * b_val;
}
float r_val = tex1Dfetch(vec_texture, i);
x.at(i) = 0.f;
r.at(i) = r_val;
z.at(i) = z_val;
p.at(i) = z_val;
rz += r_val * z_val;
} // grid stride loop
__syncthreads();
rz = blockReduceSum(rz);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, rz);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
float initPcgSolver(const SpMatParamList &spmat_param_list,
const SpMatDataList &spmat_data_list,
const DeviceArray<float> &b,
DeviceArray<float> &x,
DeviceArray<float> &r,
DeviceArray<float> &z,
DeviceArray<float> &p)
{
bindVecTexture(b);
int block = 256;
int grid = min(divUp(static_cast<int>(spmat_param_list.pad_rows), block), 512);
hipLaunchKernelGGL(( initPcgSolverKernel), dim3(grid), dim3(block), 0, 0, spmat_param_list,
spmat_data_list,
x.getHandle(),
r.getHandle(),
z.getHandle(),
p.getHandle());
checkCudaErrors(hipGetLastError());
unbindVecTexture();
float delta;
checkCudaErrors(hipMemcpyFromSymbol(&delta, output_value_new, sizeof(delta)));
return delta;
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__global__
void runMergedPcgStep1(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> y)
{
const int stride = blockDim.x * gridDim.x;
float py = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float y_val = 0.f;
// off-diagonal
uint index = spmat_data_list.offsets[i];
uint index_end = spmat_data_list.offsets[i + warpSize];
while (index < index_end)
{
int col_begin = spmat_data_list.col_indices[index];
int col_end = col_begin + spmat_param_list.block_dim;
if (col_begin == 0xFFFFFFFF)
break;
#pragma unroll
for (int col = col_begin; col < col_end; col++)
{
float p_val = tex1Dfetch(vec_texture, col);
y_val += spmat_data_list.data[index] * p_val;
index += warpSize;
}
}
// diagonal
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k;
int diag_mat_col = diag_mat_col_start + k;
float diag_val = spmat_data_list.diagonal[diag_mat_entry];
float p_val = tex1Dfetch(vec_texture, diag_mat_col);
y_val += diag_val * p_val;
}
y.at(i) = y_val;
float p_val = tex1Dfetch(vec_texture, i);
py += p_val * y_val;
} // grid stride loop
__syncthreads();
py = blockReduceSum(py);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, py);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
__global__
void runMergedPcgStep2(float alpha,
int rows,
const DeviceArrayHandle<float> p,
const DeviceArrayHandle<float> y,
DeviceArrayHandle<float> x,
DeviceArrayHandle<float> r)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < rows;
i += stride)
{
float x_val = x.at(i) + alpha * p.at(i);
float r_val = r.at(i) - alpha * y.at(i);
x.at(i) = x_val;
r.at(i) = r_val;
} // grid stride loop
}
__global__
void runMergedPcgStep3(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> z)
{
const int stride = blockDim.x * gridDim.x;
float rz = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float z_val = 0.f;
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k;
int diag_mat_col = diag_mat_col_start + k;
float precond_val = spmat_data_list.precond[diag_mat_entry];
float r_val = tex1Dfetch(vec_texture, diag_mat_col);
z_val += precond_val * r_val;
}
z.at(i) = z_val;
float r_val = tex1Dfetch(vec_texture, i);
rz += r_val * z_val;
} // grid stride loop
__syncthreads();
rz = blockReduceSum(rz);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, rz);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
__global__
void runMergedPcgStep4(float beta,
int rows,
const DeviceArrayHandle<float> z,
DeviceArrayHandle<float> p)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < rows;
i += stride)
{
float p_val = z.at(i) + beta * p.at(i);
p.at(i) = p_val;
} // grid stride loop
}
float iteratePcgSolver(float delta_old,
const SpMatParamList &spmat_param_list,
const SpMatDataList &spmat_data_list,
const DeviceArray<float> &b,
DeviceArray<float> &x,
DeviceArray<float> &y,
DeviceArray<float> &r,
DeviceArray<float> &z,
DeviceArray<float> &p)
{
bindVecTexture(p);
int block = 256;
int grid = min(divUp(static_cast<int>(spmat_param_list.pad_rows), block), 512);
hipLaunchKernelGGL(( runMergedPcgStep1), dim3(grid), dim3(block), 0, 0, spmat_param_list,
spmat_data_list,
y.getHandle());
checkCudaErrors(hipGetLastError());
unbindVecTexture();
float py_dot;
checkCudaErrors(hipMemcpyFromSymbol(&py_dot, output_value_new, sizeof(py_dot)));
float alpha = delta_old / py_dot;
hipLaunchKernelGGL(( runMergedPcgStep2), dim3(grid), dim3(block), 0, 0, alpha,
static_cast<int>(spmat_param_list.rows),
p.getHandle(),
y.getHandle(),
x.getHandle(),
r.getHandle());
checkCudaErrors(hipGetLastError());
bindVecTexture(r);
hipLaunchKernelGGL(( runMergedPcgStep3), dim3(grid), dim3(block), 0, 0, spmat_param_list,
spmat_data_list,
z.getHandle());
checkCudaErrors(hipGetLastError());
unbindVecTexture();
float delta_new;
checkCudaErrors(hipMemcpyFromSymbol(&delta_new, output_value_new, sizeof(delta_new)));
float beta = delta_new / delta_old;
hipLaunchKernelGGL(( runMergedPcgStep4), dim3(grid), dim3(block), 0, 0, beta,
static_cast<int>(spmat_param_list.rows),
z.getHandle(),
p.getHandle());
checkCudaErrors(hipGetLastError());
return delta_new;
}
} // namespace gpu
} // namespace dynfu
|
da7ebbb484e22e9327a05caf26f83b2125e651fd.cu
|
#include <cugl_ros/apps/dynamic_fusion/cuda_internal.h>
#include <cugl_ros/apps/dynamic_fusion/cuda_impl/reduce.cuh>
namespace dynfu
{
namespace gpu
{
texture<float, 1, cudaReadModeElementType> vec_texture;
__device__ uint blocks_done_new = 0;
__device__ float global_value_new = 0.f;
__device__ float output_value_new;
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
void bindVecTexture(const DeviceArray<float> &vec_data)
{
cudaChannelFormatDesc vec_desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture(0, vec_texture, reinterpret_cast<const float *>(vec_data.getDevicePtr()), vec_desc));
}
void unbindVecTexture()
{
checkCudaErrors(cudaUnbindTexture(vec_texture));
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__device__
bool computeMat6x6Inverse(float *s[6])
{
float scp[6];
#pragma unroll
for (int k = 0; k < 6; k++)
{
scp[k] = fabsf(s[k][0]);
for (int j = 1; j < 6; j++)
{
if (fabsf(s[k][j]) > scp[k])
{
scp[k] = fabsf(s[k][j]);
}
}
if (scp[k] == 0.f)
return false;
}
int pivot_to;
float scp_max;
#pragma unroll
for (int k = 0; k < 6; k++)
{
// select pivot row
pivot_to = k;
scp_max = fabsf(s[k][k] / scp[k]);
// find out which row should be on top
for (int p = k + 1; p < 6; p++)
{
if (fabsf(s[p][k] / scp[p]) > scp_max)
{
scp_max = fabsf(s[p][k] / scp[p]);
pivot_to = p;
}
}
// Pivot if necessary
if (pivot_to != k)
{
float *tmprow;
tmprow = s[k];
s[k] = s[pivot_to];
s[pivot_to] = tmprow;
float tmpscp;
tmpscp = scp[k];
scp[k] = scp[pivot_to];
scp[pivot_to] = tmpscp;
}
float mjk;
// perform gaussian elimination
for (int j = k + 1; j < 6; j++)
{
mjk = s[j][k] / s[k][k];
s[j][k] = 0.f;
for (int jj = k + 1; jj < 12; jj++)
{
s[j][jj] -= mjk * s[k][jj];
}
}
}
if (fabsf(s[5][5]) < 5e-3)
{
return false; // singular matrix!
}
//
// Now we have an upper triangular matrix.
//
// x x x x x x | y y y y y y
// 0 x x x x x | y y y y y y
// 0 0 x x x x | y y y y y y
// 0 0 0 x x x | y y y y y y
// 0 0 0 0 x x | y y y y y y
// 0 0 0 0 0 x | y y y y y y
//
// we'll back substitute to get the inverse
//
// 1 0 0 0 0 0 | z z z z z z
// 0 1 0 0 0 0 | z z z z z z
// 0 0 1 0 0 0 | z z z z z z
// 0 0 0 1 0 0 | z z z z z z
// 0 0 0 0 1 0 | z z z z z z
// 0 0 0 0 0 1 | z z z z z z
//
float mjk;
for (int k = 5; k > 0; k--)
{
for (int j = k - 1; j > -1; j--)
{
mjk = s[j][k] / s[k][k];
for (int jj= j + 1; jj < 12; jj++)
{
s[j][jj] -= mjk * s[k][jj];
}
}
}
return true;
}
__device__
void writeDiagBlockInverse(float *src[6], int entry_start, float *dst)
{
int entry = entry_start;
for (int row = 0; row < 6; row++)
{
for (int col = 0; col < 6; col++)
{
dst[entry++] = src[row][col+6] / src[row][row];
}
}
}
__device__
void writeDiagInverse(int entry_start, const float *src, float *dst)
{
int entry = entry_start;
for (int row = 0; row < 6; row++)
{
for (int col = 0; col < 6; col++)
{
if (row == col)
{
float val = src[entry];
float val_inv = (fabsf(val) < 1e-4) ? 0.f : (1.f / val);
dst[entry] = val_inv;
}
else
{
dst[entry] = 0.f;
}
entry++;
}
}
}
__global__
void computePrecondKernel(int precond_mat_block_size,
float damping,
const SpMatParamList spmat_param_list,
SpMatDataList spmat_data_list)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < precond_mat_block_size;
i += stride)
{
float r1[12], r2[12], r3[12], r4[12], r5[12], r6[12];
float *s[6];
s[0] = &r1[0];
s[1] = &r2[0];
s[2] = &r3[0];
s[3] = &r4[0];
s[4] = &r5[0];
s[5] = &r6[0];
// initialization
int diag_mat_entry_start = 36 * i;
int diag_mat_entry = diag_mat_entry_start;
for (int row = 0; row < 6; row++)
{
#pragma unroll
for (int col = 0; col < 6; col++)
{
if (row == col)
{
float inc = (row < 3) ? (2.f * damping) : damping;
spmat_data_list.diagonal[diag_mat_entry] += inc;
s[row][col] = spmat_data_list.diagonal[diag_mat_entry];
s[row][col+6] = 1.f;
}
else
{
s[row][col] = spmat_data_list.diagonal[diag_mat_entry];
s[row][col+6] = 0.f;
}
diag_mat_entry++;
}
}
bool find_inverse = computeMat6x6Inverse(s);
if (find_inverse)
{
writeDiagBlockInverse(s, diag_mat_entry_start, spmat_data_list.precond);
}
else
{
writeDiagInverse(diag_mat_entry_start, spmat_data_list.diagonal, spmat_data_list.precond);
}
} // grid stride loop
}
void computePreconditioner(float damping,
const SpMatParamList &spmat_param_list,
SpMatDataList &spmat_data_list)
{
int precond_mat_block_size = static_cast<int>(spmat_param_list.rows / spmat_param_list.block_dim);
int block = 256;
int grid = min(divUp(precond_mat_block_size, block), 512);
computePrecondKernel<<<grid, block>>>(precond_mat_block_size,
damping,
spmat_param_list,
spmat_data_list);
checkCudaErrors(cudaGetLastError());
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__global__
void initPcgSolverKernel(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> x,
DeviceArrayHandle<float> r,
DeviceArrayHandle<float> z,
DeviceArrayHandle<float> p)
{
const int stride = blockDim.x * gridDim.x;
float rz = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float z_val = 0.f;
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k; // row major
int diag_mat_col = diag_mat_col_start + k;
float precond_val = spmat_data_list.precond[diag_mat_entry];
float b_val = tex1Dfetch(vec_texture, diag_mat_col);
z_val += precond_val * b_val;
}
float r_val = tex1Dfetch(vec_texture, i);
x.at(i) = 0.f;
r.at(i) = r_val;
z.at(i) = z_val;
p.at(i) = z_val;
rz += r_val * z_val;
} // grid stride loop
__syncthreads();
rz = blockReduceSum(rz);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, rz);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
float initPcgSolver(const SpMatParamList &spmat_param_list,
const SpMatDataList &spmat_data_list,
const DeviceArray<float> &b,
DeviceArray<float> &x,
DeviceArray<float> &r,
DeviceArray<float> &z,
DeviceArray<float> &p)
{
bindVecTexture(b);
int block = 256;
int grid = min(divUp(static_cast<int>(spmat_param_list.pad_rows), block), 512);
initPcgSolverKernel<<<grid, block>>>(spmat_param_list,
spmat_data_list,
x.getHandle(),
r.getHandle(),
z.getHandle(),
p.getHandle());
checkCudaErrors(cudaGetLastError());
unbindVecTexture();
float delta;
checkCudaErrors(cudaMemcpyFromSymbol(&delta, output_value_new, sizeof(delta)));
return delta;
}
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
__global__
void runMergedPcgStep1(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> y)
{
const int stride = blockDim.x * gridDim.x;
float py = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float y_val = 0.f;
// off-diagonal
uint index = spmat_data_list.offsets[i];
uint index_end = spmat_data_list.offsets[i + warpSize];
while (index < index_end)
{
int col_begin = spmat_data_list.col_indices[index];
int col_end = col_begin + spmat_param_list.block_dim;
if (col_begin == 0xFFFFFFFF)
break;
#pragma unroll
for (int col = col_begin; col < col_end; col++)
{
float p_val = tex1Dfetch(vec_texture, col);
y_val += spmat_data_list.data[index] * p_val;
index += warpSize;
}
}
// diagonal
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k;
int diag_mat_col = diag_mat_col_start + k;
float diag_val = spmat_data_list.diagonal[diag_mat_entry];
float p_val = tex1Dfetch(vec_texture, diag_mat_col);
y_val += diag_val * p_val;
}
y.at(i) = y_val;
float p_val = tex1Dfetch(vec_texture, i);
py += p_val * y_val;
} // grid stride loop
__syncthreads();
py = blockReduceSum(py);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, py);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
__global__
void runMergedPcgStep2(float alpha,
int rows,
const DeviceArrayHandle<float> p,
const DeviceArrayHandle<float> y,
DeviceArrayHandle<float> x,
DeviceArrayHandle<float> r)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < rows;
i += stride)
{
float x_val = x.at(i) + alpha * p.at(i);
float r_val = r.at(i) - alpha * y.at(i);
x.at(i) = x_val;
r.at(i) = r_val;
} // grid stride loop
}
__global__
void runMergedPcgStep3(const SpMatParamList spmat_param_list,
const SpMatDataList spmat_data_list,
DeviceArrayHandle<float> z)
{
const int stride = blockDim.x * gridDim.x;
float rz = 0.f;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < spmat_param_list.rows;
i += stride)
{
float z_val = 0.f;
int diag_mat_col_start = (i / spmat_param_list.block_dim) * spmat_param_list.block_dim;
#pragma unroll
for (int k = 0; k < spmat_param_list.block_dim; k++)
{
int diag_mat_entry = spmat_param_list.block_dim * i + k;
int diag_mat_col = diag_mat_col_start + k;
float precond_val = spmat_data_list.precond[diag_mat_entry];
float r_val = tex1Dfetch(vec_texture, diag_mat_col);
z_val += precond_val * r_val;
}
z.at(i) = z_val;
float r_val = tex1Dfetch(vec_texture, i);
rz += r_val * z_val;
} // grid stride loop
__syncthreads();
rz = blockReduceSum(rz);
if (threadIdx.x == 0)
{
atomicAdd(&global_value_new, rz);
unsigned int total_blocks = gridDim.x;
unsigned int value = atomicInc(&blocks_done_new, total_blocks);
// last block
if (value == total_blocks - 1)
{
output_value_new = global_value_new;
global_value_new = 0.f;
blocks_done_new = 0;
}
}
}
__global__
void runMergedPcgStep4(float beta,
int rows,
const DeviceArrayHandle<float> z,
DeviceArrayHandle<float> p)
{
const int stride = blockDim.x * gridDim.x;
// grid stride loop
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < rows;
i += stride)
{
float p_val = z.at(i) + beta * p.at(i);
p.at(i) = p_val;
} // grid stride loop
}
float iteratePcgSolver(float delta_old,
const SpMatParamList &spmat_param_list,
const SpMatDataList &spmat_data_list,
const DeviceArray<float> &b,
DeviceArray<float> &x,
DeviceArray<float> &y,
DeviceArray<float> &r,
DeviceArray<float> &z,
DeviceArray<float> &p)
{
bindVecTexture(p);
int block = 256;
int grid = min(divUp(static_cast<int>(spmat_param_list.pad_rows), block), 512);
runMergedPcgStep1<<<grid, block>>>(spmat_param_list,
spmat_data_list,
y.getHandle());
checkCudaErrors(cudaGetLastError());
unbindVecTexture();
float py_dot;
checkCudaErrors(cudaMemcpyFromSymbol(&py_dot, output_value_new, sizeof(py_dot)));
float alpha = delta_old / py_dot;
runMergedPcgStep2<<<grid, block>>>(alpha,
static_cast<int>(spmat_param_list.rows),
p.getHandle(),
y.getHandle(),
x.getHandle(),
r.getHandle());
checkCudaErrors(cudaGetLastError());
bindVecTexture(r);
runMergedPcgStep3<<<grid, block>>>(spmat_param_list,
spmat_data_list,
z.getHandle());
checkCudaErrors(cudaGetLastError());
unbindVecTexture();
float delta_new;
checkCudaErrors(cudaMemcpyFromSymbol(&delta_new, output_value_new, sizeof(delta_new)));
float beta = delta_new / delta_old;
runMergedPcgStep4<<<grid, block>>>(beta,
static_cast<int>(spmat_param_list.rows),
z.getHandle(),
p.getHandle());
checkCudaErrors(cudaGetLastError());
return delta_new;
}
} // namespace gpu
} // namespace dynfu
|
74e87c4c05294e65d3e9e690e38b2381f7599c98.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hipfft.h>
#include "psv.h"
#include "const.h"
#include "hip/hip_runtime.h"
#include "plot.h"
#include "malloc.h"
int main(int argc , char *argv[]) {
//output file name
char *oname="opsv";
char *wname="wpsv";
int i,j;
int calculate,out,wav;
if(argc==2){
calculate=0;
out=0;
wav=0;
for(i=0;i<argv[1][i]!='\0';i++){
if(argv[1][i]=='o') out=1;
else if(argv[1][i]=='w') wav=1;
else if(argv[1][i]=='c') calculate=1;
}
}
else{
calculate=1;
out=0;
wav=1;
}
//calculate
if(calculate){
FILE *wfile=fopen(wname,"w");
FILE *ofile=fopen(oname,"w");
// //dimension
float **sxx=cudaMat(nx,ny),**sxy=cudaMat(nx,ny),**syy=cudaMat(nx,ny);
float **den=cudaMat(nx2,ny2),**rig=cudaMat(nx2,ny2),**lam=cudaMat(nx2,ny2);
float **ux=cudaMat(nx,ny),**uy=cudaMat(nx,ny);
float **vx=cudaMat(nx,ny),**vy=cudaMat(nx,ny);
float **up=floatMat(nx,ny),**us=floatMat(nx,ny);
float **dxux=cudaMat(nx,ny),**dxuy=cudaMat(nx,ny);
float **dyux=cudaMat(nx,ny),**dyuy=cudaMat(nx,ny);
float **dxvx=cudaMat(nx,ny),**dxvy=cudaMat(nx,ny);
float **dyvx=cudaMat(nx,ny),**dyvy=cudaMat(nx,ny);
float **dxsxx=cudaMat(nx,ny),**dxsxy=cudaMat(nx,ny);
float **dysxy=cudaMat(nx,ny),**dysyy=cudaMat(nx,ny);
float **ggg=cudaMat(nx,ny);
float **dvp=floatMat(2,nx),**dvs=floatMat(2,nx),**dden=floatMat(2,nx);
float **cxwork=cudaMat(nx,ny),**cywork=cudaMat(nx,ny);
float **uxall=floatMat(nst,ntskp),**uyall=floatMat(nst,ntskp);
float *gx=floatVec(nx),*gy=floatVec(ny);
int *istx=intVec(nst),*isty=intVec(nst),**imap=intMat(nx,ny);
for(i=0;i<nst;i++){
istx[i]=i*4+1;
isty[i]=na+1;
}
// velocity structure
float VPB = 6.9;
float VSB = 3.9;
float ROB = 3.1;
float RRIGB = ROB * VSB*VSB;
float RLANB = ROB * VPB*VPB - 2.0 * RRIGB;
float RDENB = ROB;
for(i=0;i<nx2;i++){
for(j=0;j<ny2;j++){
rig[i][j]=RRIGB;
den[i][j]=RDENB;
lam[i][j]=RLANB;
}
}
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
imap[i][j]=0;
}
}
for(i=0;i<nst;i++){
imap[istx[i]][isty[i]]=7;
}
// initialize
int kx=nbegi2(nx);
int ky=nbegi2(ny);
for(i=0;i<nx;i++){
gx[i]=dx*(i+1);
}
for(i=0;i<ny;i++){
gy[i]=dy*(i+1);
}
float ftmax=t0+at*2;
clear(vx,nx,ny,0.0);
clear(vy,nx,ny,0.0);
clear(ux,nx,ny,0.0);
clear(uy,nx,ny,0.0);
clear(sxx,nx,ny,0.0);
clear(sxy,nx,ny,0.0);
clear(syy,nx,ny,0.0);
// absorbing boundary confition
float apara=0.015;
float gg;
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
if(i+1<nxa){
gg=exp(-pow(apara*(nxa-i-1),2));
}
else if(i+1>(nx-nxa+1)){
gg=exp(-pow(apara*(i-nx+nxa),2));
}
else if(j+1>(ny-nya+1)){
gg=exp(-pow(apara*(j-ny+nya),2));
}
else{
gg=1.0;
}
ggg[i][j]=gg;
}
}
hipfftHandle plan;
hipfftComplex *data;
int dimension[1]={nx};
hipfftPlanMany(&plan,1,dimension,NULL,1,1,NULL,1,1,HIPFFT_C2C,ny*2);
hipMallocManaged((void**)&data, sizeof(hipfftComplex)*nx*ny*2);
//time step start
int ntw=0;
int ntt=0;
float t;
clock_t start0;
float c0=9.0/8.0;
float c1=1.0/24.0;
start0=clock();
for(int it=0;it<ntmax;it++){
if(it%((int)ntmax/10)==0) printf("%d%%\n",10*it/((int)ntmax/10));
ntt++;
t=dt*it;
ntw++;
diffxspm(vx,dxvx,vy,dxvy,plan,data,nx,ny,dx);
hipLaunchKernelGGL(( cudaFinidyyx), dim3(2*nx*nbt),dim3(ny/nbt), 0, 0, vy,dyvy,vx,dyvx,nx,ny,dx,dy,dt,c0,c1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaPrep), dim3(nx*nbt),dim3(ny/nbt), 0, 0, sxx,syy,sxy,lam,rig,ggg,dxvx,dxvy,dyvx,dyvy);
hipDeviceSynchronize();
diffxspm(sxy,dxsxy,sxx,dxsxx,plan,data,nx,ny,dx);
hipLaunchKernelGGL(( cudaFinidyyx), dim3(2*nx*nbt),dim3(ny/nbt), 0, 0, sxy,dysxy,syy,dysyy,nx,ny,dx,dy,dt,c0,c1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaCalc), dim3(nx*nbt),dim3(ny/nbt), 0, 0, vx,vy,ux,uy,dxsxx,dxsxy,dysxy,dysyy,ggg,den,t,
ftmax,rmxx,rmxy,rmyx,rmyy,fxx,fzz,dpxx,dpzz);
hipDeviceSynchronize();
if(ntt==nskip){
int isx,isy,it1;
for(int ns=0;ns<nst;ns++){
ntt=0;
isx=istx[ns]-1;
isy=isty[ns]-1;
it1=(it+1)/nskip;
uxall[ns][it1]=ux[isx][isy];
uyall[ns][it1]=uy[isx][isy];
}
}
if(ntw==nwrite){
ntw=0;
diffxspm(ux,dxux,uy,dxuy,plan,data,nx,ny,dx);
hipLaunchKernelGGL(( cudaFinidyyx), dim3(2*nx*nbt),dim3(ny/nbt), 0, 0, uy,dyuy,ux,dyux,nx,ny,dx,dy,dt,c0,c1);
hipDeviceSynchronize();
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
up[i][j]=dxux[i][j]+dyuy[i][j];
us[i][j]=dxuy[i][j]-dyux[i][j];
}
}
fprintMat(ofile,up,nx,ny);
fprintMat(ofile,us,nx,ny);
}
}
fprintMat(wfile,uxall,nst,ntskp);
fprintMat(wfile,uyall,nst,ntskp);
printf("100%%\n%.2f\n",(double)(clock()-start0)/CLOCKS_PER_SEC);
}
if(out){
snapPSV(oname);
}
if(wav){
wavePSV(wname);
}
return 0;
}
|
74e87c4c05294e65d3e9e690e38b2381f7599c98.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cufft.h>
#include "psv.h"
#include "const.h"
#include "cuda.h"
#include "plot.h"
#include "malloc.h"
int main(int argc , char *argv[]) {
//output file name
char *oname="opsv";
char *wname="wpsv";
int i,j;
int calculate,out,wav;
if(argc==2){
calculate=0;
out=0;
wav=0;
for(i=0;i<argv[1][i]!='\0';i++){
if(argv[1][i]=='o') out=1;
else if(argv[1][i]=='w') wav=1;
else if(argv[1][i]=='c') calculate=1;
}
}
else{
calculate=1;
out=0;
wav=1;
}
//calculate
if(calculate){
FILE *wfile=fopen(wname,"w");
FILE *ofile=fopen(oname,"w");
// //dimension
float **sxx=cudaMat(nx,ny),**sxy=cudaMat(nx,ny),**syy=cudaMat(nx,ny);
float **den=cudaMat(nx2,ny2),**rig=cudaMat(nx2,ny2),**lam=cudaMat(nx2,ny2);
float **ux=cudaMat(nx,ny),**uy=cudaMat(nx,ny);
float **vx=cudaMat(nx,ny),**vy=cudaMat(nx,ny);
float **up=floatMat(nx,ny),**us=floatMat(nx,ny);
float **dxux=cudaMat(nx,ny),**dxuy=cudaMat(nx,ny);
float **dyux=cudaMat(nx,ny),**dyuy=cudaMat(nx,ny);
float **dxvx=cudaMat(nx,ny),**dxvy=cudaMat(nx,ny);
float **dyvx=cudaMat(nx,ny),**dyvy=cudaMat(nx,ny);
float **dxsxx=cudaMat(nx,ny),**dxsxy=cudaMat(nx,ny);
float **dysxy=cudaMat(nx,ny),**dysyy=cudaMat(nx,ny);
float **ggg=cudaMat(nx,ny);
float **dvp=floatMat(2,nx),**dvs=floatMat(2,nx),**dden=floatMat(2,nx);
float **cxwork=cudaMat(nx,ny),**cywork=cudaMat(nx,ny);
float **uxall=floatMat(nst,ntskp),**uyall=floatMat(nst,ntskp);
float *gx=floatVec(nx),*gy=floatVec(ny);
int *istx=intVec(nst),*isty=intVec(nst),**imap=intMat(nx,ny);
for(i=0;i<nst;i++){
istx[i]=i*4+1;
isty[i]=na+1;
}
// velocity structure
float VPB = 6.9;
float VSB = 3.9;
float ROB = 3.1;
float RRIGB = ROB * VSB*VSB;
float RLANB = ROB * VPB*VPB - 2.0 * RRIGB;
float RDENB = ROB;
for(i=0;i<nx2;i++){
for(j=0;j<ny2;j++){
rig[i][j]=RRIGB;
den[i][j]=RDENB;
lam[i][j]=RLANB;
}
}
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
imap[i][j]=0;
}
}
for(i=0;i<nst;i++){
imap[istx[i]][isty[i]]=7;
}
// initialize
int kx=nbegi2(nx);
int ky=nbegi2(ny);
for(i=0;i<nx;i++){
gx[i]=dx*(i+1);
}
for(i=0;i<ny;i++){
gy[i]=dy*(i+1);
}
float ftmax=t0+at*2;
clear(vx,nx,ny,0.0);
clear(vy,nx,ny,0.0);
clear(ux,nx,ny,0.0);
clear(uy,nx,ny,0.0);
clear(sxx,nx,ny,0.0);
clear(sxy,nx,ny,0.0);
clear(syy,nx,ny,0.0);
// absorbing boundary confition
float apara=0.015;
float gg;
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
if(i+1<nxa){
gg=exp(-pow(apara*(nxa-i-1),2));
}
else if(i+1>(nx-nxa+1)){
gg=exp(-pow(apara*(i-nx+nxa),2));
}
else if(j+1>(ny-nya+1)){
gg=exp(-pow(apara*(j-ny+nya),2));
}
else{
gg=1.0;
}
ggg[i][j]=gg;
}
}
cufftHandle plan;
cufftComplex *data;
int dimension[1]={nx};
cufftPlanMany(&plan,1,dimension,NULL,1,1,NULL,1,1,CUFFT_C2C,ny*2);
cudaMallocManaged((void**)&data, sizeof(cufftComplex)*nx*ny*2);
//time step start
int ntw=0;
int ntt=0;
float t;
clock_t start0;
float c0=9.0/8.0;
float c1=1.0/24.0;
start0=clock();
for(int it=0;it<ntmax;it++){
if(it%((int)ntmax/10)==0) printf("%d%%\n",10*it/((int)ntmax/10));
ntt++;
t=dt*it;
ntw++;
diffxspm(vx,dxvx,vy,dxvy,plan,data,nx,ny,dx);
cudaFinidyyx<<<2*nx*nbt,ny/nbt>>>(vy,dyvy,vx,dyvx,nx,ny,dx,dy,dt,c0,c1);
cudaDeviceSynchronize();
cudaPrep<<<nx*nbt,ny/nbt>>>(sxx,syy,sxy,lam,rig,ggg,dxvx,dxvy,dyvx,dyvy);
cudaDeviceSynchronize();
diffxspm(sxy,dxsxy,sxx,dxsxx,plan,data,nx,ny,dx);
cudaFinidyyx<<<2*nx*nbt,ny/nbt>>>(sxy,dysxy,syy,dysyy,nx,ny,dx,dy,dt,c0,c1);
cudaDeviceSynchronize();
cudaCalc<<<nx*nbt,ny/nbt>>>(vx,vy,ux,uy,dxsxx,dxsxy,dysxy,dysyy,ggg,den,t,
ftmax,rmxx,rmxy,rmyx,rmyy,fxx,fzz,dpxx,dpzz);
cudaDeviceSynchronize();
if(ntt==nskip){
int isx,isy,it1;
for(int ns=0;ns<nst;ns++){
ntt=0;
isx=istx[ns]-1;
isy=isty[ns]-1;
it1=(it+1)/nskip;
uxall[ns][it1]=ux[isx][isy];
uyall[ns][it1]=uy[isx][isy];
}
}
if(ntw==nwrite){
ntw=0;
diffxspm(ux,dxux,uy,dxuy,plan,data,nx,ny,dx);
cudaFinidyyx<<<2*nx*nbt,ny/nbt>>>(uy,dyuy,ux,dyux,nx,ny,dx,dy,dt,c0,c1);
cudaDeviceSynchronize();
for(i=0;i<nx;i++){
for(j=0;j<ny;j++){
up[i][j]=dxux[i][j]+dyuy[i][j];
us[i][j]=dxuy[i][j]-dyux[i][j];
}
}
fprintMat(ofile,up,nx,ny);
fprintMat(ofile,us,nx,ny);
}
}
fprintMat(wfile,uxall,nst,ntskp);
fprintMat(wfile,uyall,nst,ntskp);
printf("100%%\n%.2f\n",(double)(clock()-start0)/CLOCKS_PER_SEC);
}
if(out){
snapPSV(oname);
}
if(wav){
wavePSV(wname);
}
return 0;
}
|
3ad3ea2f78c882db64fe360a62cee538760e29d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "dropout_layer.h"
#include "hip/hip_runtime.h"
#include "utils.h"
}
#if GPU
__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
void forward_dropout_layer_gpu(dropout_layer layer, network net)
{
if (!net.train) return;
int size = layer.inputs*layer.batch;
cuda_random(layer.rand_gpu, size);
/*
int i;
for(i = 0; i < size; ++i){
layer.rand[i] = rand_uniform();
}
cuda_push_array(layer.rand_gpu, layer.rand, size);
*/
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(hipPeekAtLastError());
}
void backward_dropout_layer_gpu(dropout_layer layer, network net)
{
if(!net.delta_gpu) return;
int size = layer.inputs*layer.batch;
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(hipPeekAtLastError());
}
#endif
|
3ad3ea2f78c882db64fe360a62cee538760e29d2.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "dropout_layer.h"
#include "cuda.h"
#include "utils.h"
}
#if GPU
__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
void forward_dropout_layer_gpu(dropout_layer layer, network net)
{
if (!net.train) return;
int size = layer.inputs*layer.batch;
cuda_random(layer.rand_gpu, size);
/*
int i;
for(i = 0; i < size; ++i){
layer.rand[i] = rand_uniform();
}
cuda_push_array(layer.rand_gpu, layer.rand, size);
*/
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
void backward_dropout_layer_gpu(dropout_layer layer, network net)
{
if(!net.delta_gpu) return;
int size = layer.inputs*layer.batch;
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
#endif
|
435bd2c86449639da8e9c307867482dd209e9dc2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <thrust/sort.h>
/*
nvcc -O3 -arch=sm_30 -o cuda_monkey monkey.cu
*/
unsigned int print2Smallest(unsigned int *arr, unsigned int arr_size)
{
unsigned int i, first, second;
/* There should be atleast two elements */
if (arr_size < 2)
{
printf(" Invalid Input ");
return 0;
}
// Error was here, before we had INT_MAX which is too low for >9 sailors
first = second = UINT_MAX;
for (i = 0; i < arr_size ; i ++)
{
/* If current element is smaller than first
then update both first and second */
if (arr[i] < first)
{
second = first;
first = arr[i];
}
/* If arr[i] is in between first and second
then update second */
else if (arr[i] < second && arr[i] != first)
second = arr[i];
}
if (second == UINT_MAX)
return first;
else
return second;
}
__global__
void monkey(unsigned long long int *coconuts, unsigned long long int extra, unsigned int *the_solutions, unsigned int *found, unsigned int sailors, unsigned int monkeys, unsigned int n)
{
if (found[0] == 0){
unsigned int j;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i+=blockDim.x*gridDim.x){
coconuts[i] = i + extra;
if (coconuts[i]%2!=0){
// Go through the number of sailors
for (j=0; j<sailors;j++){
// One for each monkey
coconuts[i] -= monkeys;
if (coconuts[i] % sailors != 0){
break;
}
coconuts[i] -= coconuts[i]/sailors;
}
if (coconuts[i] % sailors == 0){
found[0] = 1;
the_solutions[i] = i;
}
}
}
}
}
// Main method
int main()
{
clock_t start, diff;
// Size of array.
unsigned int SIZE = pow(2,25);
// CPU memory pointers
unsigned long long int *h_coc, da_solu=0;
unsigned int *h_found, *h_solutions;
// GPU memory pointers
unsigned long long int *d_coc, extra = 0;
unsigned int *d_found, *d_solutions;
// Allocate the space, CPU
h_coc = (unsigned long long int *)malloc(SIZE*sizeof(unsigned long long int));
//h_solutions = (unsigned int *)malloc(SIZE*sizeof(unsigned int));
hipHostMalloc((void**)&h_solutions, SIZE*sizeof(unsigned int), hipHostMallocDefault);
h_found = (unsigned int *)malloc(1*sizeof(unsigned int));
// Choose to run on secondary GPU
hipSetDevice(1);
// Allocate the space, GPU
hipMalloc(&d_coc, SIZE*sizeof(unsigned long long int));
hipMalloc(&d_found, 1*sizeof(unsigned int));
hipMalloc(&d_solutions, SIZE*sizeof(unsigned int));
//cudamemset can be used for initializing data (say, all zeros). 10 times faster than hipMemcpy zero array because it is done on the gpu directly.
hipMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
unsigned int monkeys = 1;
unsigned int max_sailors = 5;
// Start timer
start = clock();
/*
if (monkeys == even)
solution will be even
else
solution will be odd
somehow the kernel should then only search for even or odd solutions.
At the moment we have implemented a very speed friendly way of sending the current num of nuts to search for. This should be done in a friendlier way.
The workload will then be cut in half and it should thus take half as long
*/
// Run the loop
for (unsigned int sailors=2; sailors<max_sailors+1;sailors++){
printf("Running %u sailors, %u monkeys", sailors, monkeys);
// Send back that we want to look for a new solution
h_found[0] = 0;
hipMemset(d_found, 0, 1*sizeof(unsigned int));
// Run this loop until a solution is found for this sailor & monkey combination
while (h_found[0] == 0){
// Calling kernel (gridsize, blocksize)
hipLaunchKernelGGL(( monkey), dim3((SIZE + 255) / 256), dim3(256), 0, 0, d_coc, extra, d_solutions, d_found, sailors, monkeys, SIZE);
// Copy back result (Device to Host).
hipMemcpy(h_found, d_found, 1*sizeof(unsigned int), hipMemcpyDeviceToHost);
if (h_found[0] == 1){
// Copy back result (Device to Host). This is pinned memory so +6 Gb/s
hipMemcpy(h_solutions, d_solutions, SIZE*sizeof(unsigned int), hipMemcpyDeviceToHost);
//hipMemcpyAsync(h_solutions, d_solutions, SIZE*sizeof(unsigned int), hipMemcpyDeviceToHost, 0);
//hipDeviceSynchronize();
// Get second smallest in solutions array and recast
// possibly do this on gpu as well
da_solu = (unsigned long long int) print2Smallest(h_solutions, SIZE);
printf("\nSolution: %llu coconuts to begin with\n\n", da_solu+extra);
if (sailors != max_sailors){
// Set solution array to zero again
hipMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
}
}
else{
extra +=SIZE;
//printf(".");
}
}
// Assume that result for 5 sailors is larger than for 4 sailors and so on..
extra += da_solu;
}
// watch -n 0.5 "nvidia-settings -q GPUUtilization -q useddedicatedgpumemory"
// Print execution time
diff = clock() - start;
double totalt = (double)diff/CLOCKS_PER_SEC;
printf("Totalt: %f s\n", totalt);
// Free the allocated memory
free(h_coc);
free(h_found);
//free(h_solutions);
// Pinned memory needs to be released with the command
hipHostFree(h_solutions);
// Free GPU memory
hipFree(d_coc);
hipFree(d_found);
hipFree(d_solutions);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
return 0;
}
|
435bd2c86449639da8e9c307867482dd209e9dc2.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <thrust/sort.h>
/*
nvcc -O3 -arch=sm_30 -o cuda_monkey monkey.cu
*/
unsigned int print2Smallest(unsigned int *arr, unsigned int arr_size)
{
unsigned int i, first, second;
/* There should be atleast two elements */
if (arr_size < 2)
{
printf(" Invalid Input ");
return 0;
}
// Error was here, before we had INT_MAX which is too low for >9 sailors
first = second = UINT_MAX;
for (i = 0; i < arr_size ; i ++)
{
/* If current element is smaller than first
then update both first and second */
if (arr[i] < first)
{
second = first;
first = arr[i];
}
/* If arr[i] is in between first and second
then update second */
else if (arr[i] < second && arr[i] != first)
second = arr[i];
}
if (second == UINT_MAX)
return first;
else
return second;
}
__global__
void monkey(unsigned long long int *coconuts, unsigned long long int extra, unsigned int *the_solutions, unsigned int *found, unsigned int sailors, unsigned int monkeys, unsigned int n)
{
if (found[0] == 0){
unsigned int j;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i+=blockDim.x*gridDim.x){
coconuts[i] = i + extra;
if (coconuts[i]%2!=0){
// Go through the number of sailors
for (j=0; j<sailors;j++){
// One for each monkey
coconuts[i] -= monkeys;
if (coconuts[i] % sailors != 0){
break;
}
coconuts[i] -= coconuts[i]/sailors;
}
if (coconuts[i] % sailors == 0){
found[0] = 1;
the_solutions[i] = i;
}
}
}
}
}
// Main method
int main()
{
clock_t start, diff;
// Size of array.
unsigned int SIZE = pow(2,25);
// CPU memory pointers
unsigned long long int *h_coc, da_solu=0;
unsigned int *h_found, *h_solutions;
// GPU memory pointers
unsigned long long int *d_coc, extra = 0;
unsigned int *d_found, *d_solutions;
// Allocate the space, CPU
h_coc = (unsigned long long int *)malloc(SIZE*sizeof(unsigned long long int));
//h_solutions = (unsigned int *)malloc(SIZE*sizeof(unsigned int));
cudaHostAlloc((void**)&h_solutions, SIZE*sizeof(unsigned int), cudaHostAllocDefault);
h_found = (unsigned int *)malloc(1*sizeof(unsigned int));
// Choose to run on secondary GPU
cudaSetDevice(1);
// Allocate the space, GPU
cudaMalloc(&d_coc, SIZE*sizeof(unsigned long long int));
cudaMalloc(&d_found, 1*sizeof(unsigned int));
cudaMalloc(&d_solutions, SIZE*sizeof(unsigned int));
//cudamemset can be used for initializing data (say, all zeros). 10 times faster than cudaMemcpy zero array because it is done on the gpu directly.
cudaMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
unsigned int monkeys = 1;
unsigned int max_sailors = 5;
// Start timer
start = clock();
/*
if (monkeys == even)
solution will be even
else
solution will be odd
somehow the kernel should then only search for even or odd solutions.
At the moment we have implemented a very speed friendly way of sending the current num of nuts to search for. This should be done in a friendlier way.
The workload will then be cut in half and it should thus take half as long
*/
// Run the loop
for (unsigned int sailors=2; sailors<max_sailors+1;sailors++){
printf("Running %u sailors, %u monkeys", sailors, monkeys);
// Send back that we want to look for a new solution
h_found[0] = 0;
cudaMemset(d_found, 0, 1*sizeof(unsigned int));
// Run this loop until a solution is found for this sailor & monkey combination
while (h_found[0] == 0){
// Calling kernel (gridsize, blocksize)
monkey<<<(SIZE + 255) / 256, 256>>>(d_coc, extra, d_solutions, d_found, sailors, monkeys, SIZE);
// Copy back result (Device to Host).
cudaMemcpy(h_found, d_found, 1*sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (h_found[0] == 1){
// Copy back result (Device to Host). This is pinned memory so +6 Gb/s
cudaMemcpy(h_solutions, d_solutions, SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost);
//cudaMemcpyAsync(h_solutions, d_solutions, SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost, 0);
//cudaDeviceSynchronize();
// Get second smallest in solutions array and recast
// possibly do this on gpu as well
da_solu = (unsigned long long int) print2Smallest(h_solutions, SIZE);
printf("\nSolution: %llu coconuts to begin with\n\n", da_solu+extra);
if (sailors != max_sailors){
// Set solution array to zero again
cudaMemset(d_solutions, 0, SIZE*sizeof(unsigned int));
}
}
else{
extra +=SIZE;
//printf(".");
}
}
// Assume that result for 5 sailors is larger than for 4 sailors and so on..
extra += da_solu;
}
// watch -n 0.5 "nvidia-settings -q GPUUtilization -q useddedicatedgpumemory"
// Print execution time
diff = clock() - start;
double totalt = (double)diff/CLOCKS_PER_SEC;
printf("Totalt: %f s\n", totalt);
// Free the allocated memory
free(h_coc);
free(h_found);
//free(h_solutions);
// Pinned memory needs to be released with the command
cudaFreeHost(h_solutions);
// Free GPU memory
cudaFree(d_coc);
cudaFree(d_found);
cudaFree(d_solutions);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
return 0;
}
|
ffd8f6866165d025f518bc441eada175d82132bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/reduction/all_impl.h"
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
__global__ void assign_true(bool* ptr) {
*ptr = true;
}
__global__ void assign_false(bool* ptr) {
*ptr = false;
}
template<>
void LaunchAllKernel(hipStream_t stream, const bool* data, const int size, bool* output) {
if(thrust::all_of(thrust::hip::par.on(stream), data, data + size, thrust::identity<bool>())) {
hipLaunchKernelGGL(( assign_true), dim3(1), dim3(1), 0, stream, output);
}
else
{
hipLaunchKernelGGL(( assign_false), dim3(1), dim3(1), 0, stream, output);
}
}
} // namespace cuda
} // namespace onnxruntime
|
ffd8f6866165d025f518bc441eada175d82132bb.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/reduction/all_impl.h"
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
__global__ void assign_true(bool* ptr) {
*ptr = true;
}
__global__ void assign_false(bool* ptr) {
*ptr = false;
}
template<>
void LaunchAllKernel(cudaStream_t stream, const bool* data, const int size, bool* output) {
if(thrust::all_of(thrust::cuda::par.on(stream), data, data + size, thrust::identity<bool>())) {
assign_true<<<1, 1, 0, stream>>>(output);
}
else
{
assign_false<<<1, 1, 0, stream>>>(output);
}
}
} // namespace cuda
} // namespace onnxruntime
|
af6cd7c3498ce57250e7831a09f64618132786cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "resize.h"
__global__ static void resizeBatchKernel(const uint8_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *p_dst, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uchar3 rgb;
int nDstW = nDstWidth;
int nDstH = nDstHeight;
int yScale = nSrcHeight / nDstHeight;
int xScale = 3 * (nSrcPitch / nDstWidth);
if (tid < nDstW && tidd < nDstH) {
int j = tidd * yScale * nSrcPitch * 3;
int k = tid * xScale;
rgb.x = p_Src[j + k + 0];
rgb.y = p_Src[j + k + 1];
rgb.z = p_Src[j + k + 2];
k = tid * 3;
j = tidd * nDstWidth * 3;
p_dst[j + k + 0] = rgb.x;
p_dst[j + k + 1] = rgb.y;
p_dst[j + k + 2] = rgb.z;
}
}
void resizeBatch(uint8_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst, int nDstWidth, int nDstHeight,
hipStream_t stram) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (((nSrcHeight * 3) + blocks.y) - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight);
}
__global__ static void resizeRGBKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *p_dst, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
ushort3 rgb;
int nDstW = nDstWidth;
int nDstH = nDstHeight;
int yScale = nSrcHeight / nDstHeight;
int xScale = 3 * (nSrcPitch / nDstWidth);
if (tid < nDstW && tidd < nDstH) {
int j = tidd * yScale * nSrcPitch * 3;
int k = tid * xScale;
rgb.x = p_Src[j + k + 0];
rgb.y = p_Src[j + k + 1];
rgb.z = p_Src[j + k + 2];
k = tid * 3;
j = tidd * nDstWidth * 3;
p_dst[j + k + 0] = lookupTable_cuda[rgb.x];
p_dst[j + k + 1] = lookupTable_cuda[rgb.y];
p_dst[j + k + 2] = lookupTable_cuda[rgb.z];
}
}
void resizeRGB(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst, int nDstWidth, int nDstHeight,
int *lookupTable_cuda, hipStream_t stram) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (((nSrcHeight * 3) + blocks.y) - 1) / blocks.y, 1);
resizeRGBKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight, lookupTable_cuda);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint16_t *p_dst, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
p_dst[j + k + 2] = y2;
p_dst[j + k + 3] = y3;
p_dst[j + k + 4] = y4;
p_dst[j + k + 5] = y5;
k = tid * 3;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
p_dst[j + k + 1] = u1;
p_dst[j + k + 2] = u2;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
p_dst[j + k + 1] = v1;
p_dst[j + k + 2] = v2;
}
} else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
k = tid;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
}
} else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
p_dst[j + k + 2] = y2;
p_dst[j + k + 3] = y3;
p_dst[j + k + 4] = y4;
p_dst[j + k + 5] = y5;
k = tid * 3;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
p_dst[j + k + 1] = u1;
p_dst[j + k + 2] = u2;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
p_dst[j + k + 1] = v1;
p_dst[j + k + 2] = v2;
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint16_t *dpDst, int nDstWidth, int nDstHeight,
hipStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *dpDst, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
dpDst[j + k + 2] = lookupTable_cuda[y2];
dpDst[j + k + 3] = lookupTable_cuda[y3];
dpDst[j + k + 4] = lookupTable_cuda[y4];
dpDst[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
dpDst[j + k + 1] = lookupTable_cuda[u1];
dpDst[j + k + 2] = lookupTable_cuda[u2];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 0] = lookupTable_cuda[v0];
dpDst[j + k + 1] = lookupTable_cuda[v1];
dpDst[j + k + 2] = lookupTable_cuda[v2];
}
}
else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
k = tid;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 1] = lookupTable_cuda[v0];
}
}
else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
dpDst[j + k + 2] = lookupTable_cuda[y2];
dpDst[j + k + 3] = lookupTable_cuda[y3];
dpDst[j + k + 4] = lookupTable_cuda[y4];
dpDst[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
dpDst[j + k + 1] = lookupTable_cuda[u1];
dpDst[j + k + 2] = lookupTable_cuda[u2];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 0] = lookupTable_cuda[v0];
dpDst[j + k + 1] = lookupTable_cuda[v1];
dpDst[j + k + 2] = lookupTable_cuda[v2];
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst,
int nDstWidth, int nDstHeight, int *lookupTable_cuda, hipStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight,
dpDst, nDstWidth, nDstHeight, lookupTable_cuda);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *dpDst0, uint8_t *dpDst1, uint8_t *dpDst2, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
dpDst0[j + k + 2] = lookupTable_cuda[y2];
dpDst0[j + k + 3] = lookupTable_cuda[y3];
dpDst0[j + k + 4] = lookupTable_cuda[y4];
dpDst0[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst1[j + k + 1] = lookupTable_cuda[u1];
dpDst1[j + k + 2] = lookupTable_cuda[u2];
dpDst2[j + k + 0] = lookupTable_cuda[v0];
dpDst2[j + k + 1] = lookupTable_cuda[v1];
dpDst2[j + k + 2] = lookupTable_cuda[v2];
}
} else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
k = tid;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst2[j + k + 1] = lookupTable_cuda[v0];
}
} else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
dpDst0[j + k + 2] = lookupTable_cuda[y2];
dpDst0[j + k + 3] = lookupTable_cuda[y3];
dpDst0[j + k + 4] = lookupTable_cuda[y4];
dpDst0[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst1[j + k + 1] = lookupTable_cuda[u1];
dpDst1[j + k + 2] = lookupTable_cuda[u2];
dpDst2[j + k + 0] = lookupTable_cuda[v0];
dpDst2[j + k + 1] = lookupTable_cuda[v1];
dpDst2[j + k + 2] = lookupTable_cuda[v2];
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst0, uint8_t *dpDst1, uint8_t *dpDst2,
int nDstWidth, int nDstHeight, int *lookupTable_cuda, hipStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight,
dpDst0, dpDst1, dpDst2, nDstWidth, nDstHeight, lookupTable_cuda);
}
|
af6cd7c3498ce57250e7831a09f64618132786cd.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "resize.h"
__global__ static void resizeBatchKernel(const uint8_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *p_dst, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uchar3 rgb;
int nDstW = nDstWidth;
int nDstH = nDstHeight;
int yScale = nSrcHeight / nDstHeight;
int xScale = 3 * (nSrcPitch / nDstWidth);
if (tid < nDstW && tidd < nDstH) {
int j = tidd * yScale * nSrcPitch * 3;
int k = tid * xScale;
rgb.x = p_Src[j + k + 0];
rgb.y = p_Src[j + k + 1];
rgb.z = p_Src[j + k + 2];
k = tid * 3;
j = tidd * nDstWidth * 3;
p_dst[j + k + 0] = rgb.x;
p_dst[j + k + 1] = rgb.y;
p_dst[j + k + 2] = rgb.z;
}
}
void resizeBatch(uint8_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst, int nDstWidth, int nDstHeight,
cudaStream_t stram) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (((nSrcHeight * 3) + blocks.y) - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight);
}
__global__ static void resizeRGBKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *p_dst, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
ushort3 rgb;
int nDstW = nDstWidth;
int nDstH = nDstHeight;
int yScale = nSrcHeight / nDstHeight;
int xScale = 3 * (nSrcPitch / nDstWidth);
if (tid < nDstW && tidd < nDstH) {
int j = tidd * yScale * nSrcPitch * 3;
int k = tid * xScale;
rgb.x = p_Src[j + k + 0];
rgb.y = p_Src[j + k + 1];
rgb.z = p_Src[j + k + 2];
k = tid * 3;
j = tidd * nDstWidth * 3;
p_dst[j + k + 0] = lookupTable_cuda[rgb.x];
p_dst[j + k + 1] = lookupTable_cuda[rgb.y];
p_dst[j + k + 2] = lookupTable_cuda[rgb.z];
}
}
void resizeRGB(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst, int nDstWidth, int nDstHeight,
int *lookupTable_cuda, cudaStream_t stram) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (((nSrcHeight * 3) + blocks.y) - 1) / blocks.y, 1);
resizeRGBKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight, lookupTable_cuda);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint16_t *p_dst, int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
p_dst[j + k + 2] = y2;
p_dst[j + k + 3] = y3;
p_dst[j + k + 4] = y4;
p_dst[j + k + 5] = y5;
k = tid * 3;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
p_dst[j + k + 1] = u1;
p_dst[j + k + 2] = u2;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
p_dst[j + k + 1] = v1;
p_dst[j + k + 2] = v2;
}
} else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
k = tid;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
}
} else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
p_dst[j + k + 0] = y0;
p_dst[j + k + 1] = y1;
p_dst[j + k + 2] = y2;
p_dst[j + k + 3] = y3;
p_dst[j + k + 4] = y4;
p_dst[j + k + 5] = y5;
k = tid * 3;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight;
p_dst[j + k + 0] = u0;
p_dst[j + k + 1] = u1;
p_dst[j + k + 2] = u2;
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
p_dst[j + k + 0] = v0;
p_dst[j + k + 1] = v1;
p_dst[j + k + 2] = v2;
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint16_t *dpDst, int nDstWidth, int nDstHeight,
cudaStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight, dpDst, nDstWidth, nDstHeight);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *dpDst, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
dpDst[j + k + 2] = lookupTable_cuda[y2];
dpDst[j + k + 3] = lookupTable_cuda[y3];
dpDst[j + k + 4] = lookupTable_cuda[y4];
dpDst[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
dpDst[j + k + 1] = lookupTable_cuda[u1];
dpDst[j + k + 2] = lookupTable_cuda[u2];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 0] = lookupTable_cuda[v0];
dpDst[j + k + 1] = lookupTable_cuda[v1];
dpDst[j + k + 2] = lookupTable_cuda[v2];
}
}
else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
k = tid;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 1] = lookupTable_cuda[v0];
}
}
else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
dpDst[j + k + 0] = lookupTable_cuda[y0];
dpDst[j + k + 1] = lookupTable_cuda[y1];
dpDst[j + k + 2] = lookupTable_cuda[y2];
dpDst[j + k + 3] = lookupTable_cuda[y3];
dpDst[j + k + 4] = lookupTable_cuda[y4];
dpDst[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst[j + k + 0] = lookupTable_cuda[u0];
dpDst[j + k + 1] = lookupTable_cuda[u1];
dpDst[j + k + 2] = lookupTable_cuda[u2];
j = tidd * nDstWidth / 2 + nDstWidth * nDstHeight * 3 / 2;
dpDst[j + k + 0] = lookupTable_cuda[v0];
dpDst[j + k + 1] = lookupTable_cuda[v1];
dpDst[j + k + 2] = lookupTable_cuda[v2];
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst,
int nDstWidth, int nDstHeight, int *lookupTable_cuda, cudaStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight,
dpDst, nDstWidth, nDstHeight, lookupTable_cuda);
}
__global__ static void resizeBatchKernel(const uint16_t *p_Src, int nSrcPitch, int nSrcHeight,
uint8_t *dpDst0, uint8_t *dpDst1, uint8_t *dpDst2, int nDstWidth, int nDstHeight, int *lookupTable_cuda) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint4 pF;
int scale = nSrcHeight / nDstHeight;
if (scale == 4) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 32;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.w = (uint32_t)p_Src[j + k + 6];
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)(pF.w & 0x000003FF);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v1 = (uint32_t)(pF.z & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 16] + ((uint32_t)p_Src[j + k + 17] << 16);
pF.z = ((uint32_t)p_Src[j + k + 21] << 16);
pF.w = (uint32_t)p_Src[j + k + 22] + ((uint32_t)p_Src[j + k + 23] << 16);
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF);
pF.y = ((uint32_t)p_Src[j + k + 27] << 16);
y5 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
k = tid * 6;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
dpDst0[j + k + 2] = lookupTable_cuda[y2];
dpDst0[j + k + 3] = lookupTable_cuda[y3];
dpDst0[j + k + 4] = lookupTable_cuda[y4];
dpDst0[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst1[j + k + 1] = lookupTable_cuda[u1];
dpDst1[j + k + 2] = lookupTable_cuda[u2];
dpDst2[j + k + 0] = lookupTable_cuda[v0];
dpDst2[j + k + 1] = lookupTable_cuda[v1];
dpDst2[j + k + 2] = lookupTable_cuda[v2];
}
} else if (scale == 6) {
uint32_t v0, y0, u0, y1;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
y1 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
k = tid * 2;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
k = tid;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst2[j + k + 1] = lookupTable_cuda[v0];
}
} else if (scale == 2) {
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
int nDstH = nDstHeight;
int nDstW = nDstWidth / 6;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcPitch * scale;
int k = tid * 16;
pF.x = (uint32_t)p_Src[j + k + 0] + ((uint32_t)p_Src[j + k + 1] << 16);
pF.y = ((uint32_t)p_Src[j + k + 3] << 16);
pF.z = ((uint32_t)p_Src[j + k + 5] << 16);
pF.w = (uint32_t)p_Src[j + k + 6] + ((uint32_t)p_Src[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
u0 = (uint32_t)(pF.x & 0x000003FF);
y1 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u1 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
v1 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y2 = (uint32_t)(pF.w & 0x000003FF);
pF.x = (uint32_t)p_Src[j + k + 8] + ((uint32_t)p_Src[j + k + 9] << 16);
pF.y = (uint32_t)p_Src[j + k + 10] + ((uint32_t)p_Src[j + k + 11] << 16);
pF.z = (uint32_t)p_Src[j + k + 12];
pF.w = (uint32_t)p_Src[j + k + 14];
y3 = (uint32_t)((pF.x & 0x000FFC00) >> 10);
y4 = (uint32_t)((pF.y & 0x3FF00000) >> 20);
u2 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
v2 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)(pF.w & 0x000003FF);
k = tid * 6;
j = tidd * nDstWidth;
dpDst0[j + k + 0] = lookupTable_cuda[y0];
dpDst0[j + k + 1] = lookupTable_cuda[y1];
dpDst0[j + k + 2] = lookupTable_cuda[y2];
dpDst0[j + k + 3] = lookupTable_cuda[y3];
dpDst0[j + k + 4] = lookupTable_cuda[y4];
dpDst0[j + k + 5] = lookupTable_cuda[y5];
k = tid * 3;
j = tidd * nDstWidth / 2;
dpDst1[j + k + 0] = lookupTable_cuda[u0];
dpDst1[j + k + 1] = lookupTable_cuda[u1];
dpDst1[j + k + 2] = lookupTable_cuda[u2];
dpDst2[j + k + 0] = lookupTable_cuda[v0];
dpDst2[j + k + 1] = lookupTable_cuda[v1];
dpDst2[j + k + 2] = lookupTable_cuda[v2];
}
}
}
void resizeBatch(uint16_t *dpSrc, int nSrcPitch, int nSrcHeight, uint8_t *dpDst0, uint8_t *dpDst1, uint8_t *dpDst2,
int nDstWidth, int nDstHeight, int *lookupTable_cuda, cudaStream_t stram) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcPitch + blocks.x - 1) / blocks.x, (nSrcHeight + blocks.y - 1) / blocks.y, 1);
resizeBatchKernel << <grids, blocks, 0, stram >> > (dpSrc, nSrcPitch, nSrcHeight,
dpDst0, dpDst1, dpDst2, nDstWidth, nDstHeight, lookupTable_cuda);
}
|
ff56a27eaad273e2e2463a2b430aea59e4ce7f56.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <op_boilerplate.h>
#include <loops/broadcasting_bool.h>
#include <loops/legacy_ops.h>
#include <types/types.h>
#include <Environment.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include <stdexcept>
#include <StringUtils.h>
using namespace simdOps;
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(
void *x,
Nd4jLong *xShapeInfo,
void *y,
Nd4jLong *yShapeInfo,
void *z,
Nd4jLong *zShapeInfo,
int *dimension,
int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolInverseSimple(
void *x,
Nd4jLong *xShapeInfo,
void *y,
Nd4jLong *yShapeInfo,
void *z,
Nd4jLong *zShapeInfo,
int *dimension,
int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformInverseCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
namespace functions {
namespace broadcast {
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipLaunchKernelGGL(( broadcastBoolSimple<X, Z, OpClass>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateInverseBroadcast(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
hipLaunchKernelGGL(( broadcastBoolInverseSimple<X, Z, OpClass>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execInverseBroadcast(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateInverseBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformInverseCuda(
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong xEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(yShapeInfo) / tadLength;
xEWS = shape::elementWiseStride(xShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto rZ = z + tadOffsetsZ[r];
auto rY = y + tadOffsets[r];
if(tadEWS > 0 && zEWS > 0 && xEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(x[i * xEWS], rY[i * tadEWS]);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, tadLength);
auto yOffset = shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ, tadLength);
rZ[zOffset] = OpType::op(x[xOffset], rY[yOffset]);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong yEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
__shared__ Z *rZ;
__shared__ X *rX;
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
if (threadIdx.x == 0) {
rZ = z + tadOffsetsZ[r];
rX = x + tadOffsets[r];
}
__syncthreads();
if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS]);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
auto yOffset = shape::getIndexOffset(i, yShapeInfo, tadLength);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ, tadLength);
rZ[zOffset] = OpType::op(rX[xOffset], y[yOffset]);
}
}
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT BroadcastBool, , LIBND4J_TYPES, BOOL_TYPES);
}
}
|
ff56a27eaad273e2e2463a2b430aea59e4ce7f56.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <op_boilerplate.h>
#include <loops/broadcasting_bool.h>
#include <loops/legacy_ops.h>
#include <types/types.h>
#include <Environment.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include <stdexcept>
#include <StringUtils.h>
using namespace simdOps;
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(
void *x,
Nd4jLong *xShapeInfo,
void *y,
Nd4jLong *yShapeInfo,
void *z,
Nd4jLong *zShapeInfo,
int *dimension,
int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolInverseSimple(
void *x,
Nd4jLong *xShapeInfo,
void *y,
Nd4jLong *yShapeInfo,
void *z,
Nd4jLong *zShapeInfo,
int *dimension,
int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformInverseCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
namespace functions {
namespace broadcast {
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
broadcastBoolSimple<X, Z, OpClass><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateInverseBroadcast(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
broadcastBoolInverseSimple<X, Z, OpClass><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
nd4j::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execInverseBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *y, Nd4jLong *yShapeInfo, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateInverseBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformInverseCuda(
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong xEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(yShapeInfo) / tadLength;
xEWS = shape::elementWiseStride(xShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto rZ = z + tadOffsetsZ[r];
auto rY = y + tadOffsets[r];
if(tadEWS > 0 && zEWS > 0 && xEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(x[i * xEWS], rY[i * tadEWS]);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, tadLength);
auto yOffset = shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ, tadLength);
rZ[zOffset] = OpType::op(x[xOffset], rY[yOffset]);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
int *dimension, int dimensionLength,
Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong yEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
__shared__ Z *rZ;
__shared__ X *rX;
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
if (threadIdx.x == 0) {
rZ = z + tadOffsetsZ[r];
rX = x + tadOffsets[r];
}
__syncthreads();
if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS]);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength);
auto yOffset = shape::getIndexOffset(i, yShapeInfo, tadLength);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ, tadLength);
rZ[zOffset] = OpType::op(rX[xOffset], y[yOffset]);
}
}
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT BroadcastBool, , LIBND4J_TYPES, BOOL_TYPES);
}
}
|
8e9910132b3c76ad9ded9d3de9ca8b080d85dc51.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Implicit_restart_Arnoldi.h"
__global__ void real_to_cublasComplex_kernel(int N, real *vec_source_re, real *vec_source_im, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest[i].x=vec_source_re[i];
vec_dest[i].y=vec_source_im[i];
}
}
__global__ void real_to_cublasComplex_kernel(int N, real *vec_source_re, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest[i].x=vec_source_re[i];
vec_dest[i].y=0.0;
}
}
__global__ void cublasComplex_to_real_kernel(int N, cublasComplex *vec_source, real *vec_dest_re, real *vec_dest_im){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest_re[i]=vec_source[i].x;
vec_dest_im[i]=vec_source[i].y;
}
}
void real_complex_to_cublas_complex(int N, complex real* cpu_complex, cublasComplex *gpu_complex){
real *cpu_real, *cpu_imag, *gpu_real, *gpu_imag;
Arnoldi::device_allocate_all_real(N,1, 1, 2,&gpu_real, &gpu_imag);
Arnoldi::allocate_real(N, 1, 1, 2,&cpu_real, &cpu_imag);
for(int j=0;j<N;j++){
cpu_real[j]=creal(cpu_complex[j]);
cpu_imag[j]=cimag(cpu_complex[j]);
}
Arnoldi::to_device_from_host_real_cpy(gpu_real, cpu_real, N, 1,1);
Arnoldi::to_device_from_host_real_cpy(gpu_imag, cpu_imag, N, 1,1);
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( real_to_cublasComplex_kernel), dim3(blocks), dim3(threads), 0, 0, N, gpu_real, gpu_imag, gpu_complex);
Arnoldi::deallocate_real(2,cpu_real, cpu_imag);
Arnoldi::device_deallocate_all_real(2, gpu_real, gpu_imag);
}
void real_device_to_cublas_complex(int N, real* gpu_real, cublasComplex *gpu_complex){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( real_to_cublasComplex_kernel), dim3(blocks), dim3(threads), 0, 0, N, gpu_real, gpu_complex);
}
void cublas_complex_to_complex_real(int N, cublasComplex *gpu_complex, complex real* cpu_complex){
real *cpu_real, *cpu_imag, *gpu_real, *gpu_imag;
Arnoldi::device_allocate_all_real(N,1, 1, 2,&gpu_real, &gpu_imag);
Arnoldi::allocate_real(N, 1, 1, 2,&cpu_real, &cpu_imag);
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( cublasComplex_to_real_kernel), dim3(blocks), dim3(threads), 0, 0, N, gpu_complex, gpu_real, gpu_imag);
Arnoldi::to_host_from_device_real_cpy(cpu_real, gpu_real, N, 1,1);
Arnoldi::to_host_from_device_real_cpy(cpu_imag, gpu_imag, N, 1,1);
for(int j=0;j<N;j++){
cpu_complex[j]=cpu_real[j]+I*cpu_imag[j];
}
Arnoldi::deallocate_real(2,cpu_real, cpu_imag);
Arnoldi::device_deallocate_all_real(2, gpu_real, gpu_imag);
}
void cublas_complex_to_device_real(int N, cublasComplex *gpu_complex, real* gpu_real, real* gpu_imag){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( cublasComplex_to_real_kernel), dim3(blocks), dim3(threads), 0, 0, N, gpu_complex, gpu_real, gpu_imag);
}
__global__ void permute_matrix_colums_kernel(int MatrixRaw, int coloms, int *sorted_list_d, cublasComplex *vec_source, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<MatrixRaw){
for(int j=0;j<coloms;j++){
int index=sorted_list_d[j];
vec_dest[I2(i,j,MatrixRaw)]=vec_source[I2(i,index,MatrixRaw)];
}
}
}
void permute_matrix_colums(int MatrixRaw, int coloms, int *sorted_list_d, cublasComplex *vec_source, cublasComplex *vec_dest){
dim3 threads(BLOCKSIZE);
int blocks_x=(MatrixRaw+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( permute_matrix_colums_kernel), dim3(blocks), dim3(threads), 0, 0, MatrixRaw, coloms, sorted_list_d, vec_source, vec_dest);
}
__global__ void RHS_of_eigenproblem_real_device_kernel(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=lambda_real*Vec_real[i]-lambda_imag*Vec_imag[i];
}
}
__global__ void RHS_of_eigenproblem_imag_device_kernel(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=lambda_imag*Vec_real[i]+lambda_real*Vec_imag[i];
}
}
void RHS_of_eigenproblem_device_real(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( RHS_of_eigenproblem_real_device_kernel), dim3(blocks), dim3(threads), 0, 0, N, lambda_real, Vec_real, lambda_imag, Vec_imag, Vec_res);
}
void RHS_of_eigenproblem_device_imag(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( RHS_of_eigenproblem_imag_device_kernel), dim3(blocks), dim3(threads), 0, 0, N, lambda_real, Vec_real, lambda_imag, Vec_imag, Vec_res);
}
__global__ void Residual_eigenproblem_device_kernel(int N, real* Vl_r_d, real* Vr_r_d, real* Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=Vl_r_d[i]-Vr_r_d[i];
}
}
void Residual_eigenproblem_device(int N, real* Vl_r_d, real* Vr_r_d, real* Vre_d){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
hipLaunchKernelGGL(( Residual_eigenproblem_device_kernel), dim3(blocks), dim3(threads), 0, 0, N, Vl_r_d, Vr_r_d, Vre_d);
}
void get_upper_matrix_part_host(int N_source, real *source_matrix, real *dest_matrix, int N_dist){
for(int i=0;i<N_dist;i++)
for(int j=0;j<N_dist;j++){
dest_matrix[I2(i,j,N_dist)]=source_matrix[I2(i,j,N_source)];
}
}
void permute_matrix_colums(int MatrixRaw, int coloms, int *sorted_list, real complex *vec_source, real complex *vec_dest){
for(int i=0;i<MatrixRaw;i++){
for(int j=0;j<coloms;j++){
int index=sorted_list[j];
vec_dest[I2(i,j,MatrixRaw)]=vec_source[I2(i,index,MatrixRaw)];
}
}
}
//which:
// "LR" - largest real, "LM" - largest magnitude
//
real Implicit_restart_Arnoldi_GPU_data(hipblasHandle_t handle, bool verbose, int N, user_map_vector Axb, void *user_struct, real *vec_f_d, char which[2], int k, int m, complex real* eigenvaluesA, real tol, int max_iter, real *eigenvectors_real_d, real *eigenvectors_imag_d, int BLASThreads){
//wrapper without external routine like matrix Exponent
real ritz_norm=1.0;
ritz_norm=Implicit_restart_Arnoldi_GPU_data_Matrix_Exponent(handle, verbose, N, Axb, user_struct, Axb, user_struct, vec_f_d, which, which, k, m, eigenvaluesA, tol, max_iter, eigenvectors_real_d, eigenvectors_imag_d, BLASThreads);
return ritz_norm;
}
real Implicit_restart_Arnoldi_GPU_data_Matrix_Exponent(hipblasHandle_t handle, bool verbose, int N, user_map_vector Axb_exponent_invert, void *user_struct_exponent_invert, user_map_vector Axb, void *user_struct, real *vec_f_d, char which[2], char which_exponent[2], int k, int m, complex real* eigenvaluesA, real tol, int max_iter, real *eigenvectors_real_d, real *eigenvectors_imag_d, int BLASThreads){
openblas_set_num_threads(BLASThreads); //sets number of threads to be used by OpenBLAS
real *vec_c=new real[m];
real *vec_h=new real[m];
real *vec_q=new real[m];
real *H=new real[m*m];
real *R=new real[m*m];
real *Q=new real[m*m];
real *H1=new real[m*m];
real *H2=new real[m*m];
matrixZero(m, m, H);
matrixZero(m, m, R);
matrixZero(m, m, Q);
matrixZero(m, m, H1);
matrixZero(m, m, H2);
real complex *eigenvectorsH=new real complex[m*m];
real complex *eigenvaluesH=new real complex[m*m];
real complex *eigenvectorsH_kk=new real complex[k*k];
real complex *eigenvectorsH_kk_sorted=new real complex[k*k];
real complex *eigenvaluesH_kk=new real complex[k*k];
real *ritz_vector=new real[m];
real *V_d, *V1_d, *Q_d; //matrixes on GPU
real *vec_f1_d, *vec_v_d, *vec_w_d, *vec_c_d, *vec_h_d, *vec_q_d; //vectors on GPU
//real *Vl_r_d, *Vl_i_d, *Vr_r_d, *Vr_i_d, *Vre_d, *Vim_d; //vectors on GPU for eigenvector residuals
//real *eigenvectors_real_d, *eigenvectors_imag_d; //Matrix Eigenvectors
bool external_eigenvectors=true;
if(eigenvectors_real_d==NULL){
external_eigenvectors=false;
Arnoldi::device_allocate_all_real(N,k, 1, 2, &eigenvectors_real_d, &eigenvectors_imag_d);
}
Arnoldi::device_allocate_all_real(N,m, 1, 2, &V_d, &V1_d);
Arnoldi::device_allocate_all_real(N, 1,1, 3, &vec_f1_d, &vec_w_d, &vec_v_d);
Arnoldi::device_allocate_all_real(m, 1,1, 3, &vec_c_d, &vec_h_d, &vec_q_d);
Arnoldi::device_allocate_all_real(m,m, 1, 1, &Q_d);
//Arnoldi::device_allocate_all_real(N, 1,1, 6, &Vl_r_d, &Vl_i_d, &Vr_r_d, &Vr_i_d, &Vre_d, &Vim_d);
//sets initial guesses for Krylov vectors
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_f1_d);
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_v_d);
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_w_d);
// Allocate memroy for eigenvectors!
cublasComplex *eigenvectorsH_d, *eigenvectorsA_d, *eigenvectorsA_unsorted_d, *eigenvectorsA_sorted_d;
eigenvectorsH_d=Arnoldi::device_allocate_complex(k, k, 1);
eigenvectorsA_d=Arnoldi::device_allocate_complex(N, k, 1);
eigenvectorsA_unsorted_d=Arnoldi::device_allocate_complex(N, k, 1);
eigenvectorsA_sorted_d=Arnoldi::device_allocate_complex(N, k, 1);
// hipblasHandle_t handle; //init cublas
// hipblasStatus_t ret;
// ret = hipblasCreate(&handle);
// Arnoldi::checkError(ret, " hipblasCreate(). ");
int k0=1;
int iterations=0;
real ritz_norm=1.0;
timer_start();
while(((iterations++)<max_iter)&&(ritz_norm>tol)){
Arnoldi_driver(handle, N, Axb_exponent_invert, user_struct_exponent_invert, V_d, H, vec_f_d, k0-1, m, vec_v_d, vec_w_d, vec_c_d, vec_h_d, vec_h); //Build orthogonal Krylov subspace
select_shifts(m, H, which_exponent, eigenvectorsH, eigenvaluesH, ritz_vector); //select basisi shift depending on 'which'
QR_shifts(k, m, Q, H, eigenvaluesH, &k0); //Do QR shifts of basis. Returns active eigenvalue indexes and Q-matrix for basis shift
real vec_f_norm=Arnoldi::vector_norm2_GPU(handle, N, vec_f_d);
for(int i=0;i<k0;i++){
ritz_vector[i]=ritz_vector[i]*vec_f_norm;
}
get_matrix_colomn(m, m, Q, vec_q, k0);
real hl=H[I2(k0,k0-1,m)];
real ql=Q[I2(m-1,k0-1,m)];
//f = V*vec_q*hl + f*ql;
Arnoldi::to_device_from_host_real_cpy(vec_q_d, vec_q, m, 1,1); //vec_q -> vec_q_d
Arnoldi::matrixMultVector_GPU(handle, N, V_d, m, hl, vec_q_d, ql, vec_f_d);
//matrixMultVector(N, V, m, hl, vec_q, ql, vec_f1, vec_f); //GG
//fix this shit!!! V
//we must apply Q only as matrix mXk0 on a submatrix V NXm!!!
for(int i=0;i<m;i++){
for(int j=k0;j<m;j++){
Q[I2(i,j,m)]=1.0*delta(i,j);
}
}
//Copy matrixQtoGPUmemory!
//here!
Arnoldi::to_device_from_host_real_cpy(Q_d, Q, m, m, 1); //Q -> Q_d
Arnoldi::matrixMultMatrix_GPU(handle, N, m, m, V_d, 1.0, Q_d, 0.0, V1_d); //OK
//matrix_copy(N, m, V1, V); //GG
Arnoldi::vector_copy_GPU(handle, N*m, V1_d, V_d);
ritz_norm=vector_normC(k0,ritz_vector);
if(verbose){
printf("it=%i, ritz norms=", iterations);
for(int ll=0;ll<k0;ll++){
printf("%0.3le ",(double)ritz_vector[ll]);
}
printf("\n");
}
else{
// if(iterations%50==0)
// printf("it=%i, ritz norm_C=%.05e \n", iterations, ritz_norm);
}
}
timer_stop();
timer_print();
if(verbose)
printf("\ncomputing original map eigenvectors and eigenvalues...\n");
//test Schur!
real *Q_Schur=new real[k*k];
real *H_Schur=new real[k*k];
//get_upper_matrix_part_host(m, H, H_Schur, k);
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
H_Schur[I2(i,j,k)]=H[I2(i,j,m)];
}
}
print_matrix("H_pre.dat", k, k, H_Schur);
//check pre-Galerkin eigenvalues of H matrix
real complex *HC1=new real complex[k*k];
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
HC1[I2(i,j,k)]=H_Schur[I2(i,j,k)]+0.0*I;
//HC[I2(i,j,k)]=H[I2(i,j,m)]+0.0*I;
}
}
MatrixComplexEigensystem(eigenvectorsH_kk, eigenvaluesH_kk, HC1, k);
delete [] HC1;
printf("\n Eigenvalues of H matrix before Galerkin projection:\n");
for(int i=0;i<k;i++){
real ritz_val=ritz_vector[i];
printf("\n%.08le,%.08le,ritz:,%.04le", (double) creal(eigenvaluesH_kk[i]), (double) cimag(eigenvaluesH_kk[i]), (double)ritz_val );
}
//check ends
Schur_Hessinberg_matrix(H_Schur, k, Q_Schur); //returns Q as orthogonal matrix whose columns are the Schur vectors and the input matrix is overwritten as an upper quasi-triangular matrix (the Schur form of input matrix)
print_matrix("H_Schur.dat", k, k, H_Schur);
print_matrix("Q_Schur.dat", k, k, Q_Schur);
//compute eigenvectors
//[Q,R] = schur(H(1:ko,1:ko));
//V = V(:,1:ko)*Q; <--- eigenvectors
//R= V'*(A*V);
//eigens=eig(R); <--- eigenvalues
//residual: resid = norm(A*V - V*R);
real *Q_Schur_d;
real *Vcolres_d, *VRres_d;
//real *V1_temp=new real[N*k];
Arnoldi::device_allocate_all_real(k, k, 1, 1, &Q_Schur_d);
Arnoldi::device_allocate_all_real(N, k, 1, 2, &Vcolres_d, &VRres_d);
Arnoldi::to_device_from_host_real_cpy(Q_Schur_d, Q_Schur, k, k,1);
Arnoldi::matrixMultMatrix_GPU(handle, N, k, k, V_d, 1.0, Q_Schur_d, 0.0, V1_d); //Vectors are in V1_d!!!
//Arnoldi::to_host_from_device_real_cpy(V1_temp, V1_d, N, k, 1);
//print_matrix("V1_d.dat", N, k, V1_temp);
//form Vcolres_d=A*V1_d
for(int i=0;i<k;i++){
Axb(user_struct, &V1_d[i*N], &Vcolres_d[i*N]);
Arnoldi::check_for_nans("IRA: Schur basis projeciton out", N, &Vcolres_d[i*N]);
}
Arnoldi::matrixTMultMatrix_GPU(handle, k, k, N, V1_d, 1.0, Vcolres_d, 0.0, Q_Schur_d); //Vectors are in V1_d!!! Q_Schur_d := R in matlab
// Arnoldi::to_host_from_device_real_cpy(V1_temp, Vcolres_d, N, k, 1);
// print_matrix("Vcol_d.dat", N, k, V1_temp);
//delete [] V1_temp;
//check residual!
real *residualAV=new real[k];
for(int i=0;i<k;i++){
Axb(user_struct, &V1_d[i*N], &Vcolres_d[i*N]);
Arnoldi::check_for_nans("IRA: Schur basis projeciton out in residual", N, &Vcolres_d[i*N]);
}
Arnoldi::matrixMultMatrix_GPU(handle, N, k, k, V1_d, 1.0, Q_Schur_d, 0.0, VRres_d);
for(int i=0;i<k;i++){
Arnoldi::vectors_add_GPU(handle, N, -1.0, &Vcolres_d[i*N], &VRres_d[i*N]);
residualAV[i]=Arnoldi::vector_norm2_GPU(handle, N, &VRres_d[i*N]);
}
//done
Arnoldi::to_host_from_device_real_cpy(H_Schur, Q_Schur_d, k, k, 1);
//print_matrix("RRR.dat", k, k, H_Schur);
Arnoldi::device_deallocate_all_real(3, Q_Schur_d,Vcolres_d, VRres_d);
//170820 stopped here!!!
real complex *HC=new real complex[k*k];
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
HC[I2(i,j,k)]=H_Schur[I2(i,j,k)]+0.0*I;
//HC[I2(i,j,k)]=H[I2(i,j,m)]+0.0*I;
}
}
MatrixComplexEigensystem(eigenvectorsH_kk, eigenvaluesH_kk, HC, k);
delete [] HC;
delete [] Q_Schur;
delete [] H_Schur;
int *sorted_list=new int[k];
int *sorted_list_d=Arnoldi::device_allocate_int(k, 1, 1);
get_sorted_index(k, which, eigenvaluesH_kk, sorted_list);
//sort eigenvectors of Shur form of Hessinberg matrix
permute_matrix_colums(k, k, sorted_list, eigenvectorsH_kk, eigenvectorsH_kk_sorted);
// Now store EigenvectorsH to GPU as cublasComplex.
real_complex_to_cublas_complex(k*k, eigenvectorsH_kk_sorted, eigenvectorsH_d);
real_device_to_cublas_complex(N*k, V_d, eigenvectorsA_unsorted_d);
Arnoldi::to_device_from_host_int_cpy(sorted_list_d, sorted_list, k, 1, 1);
permute_matrix_colums(N, k, sorted_list_d, eigenvectorsA_unsorted_d, eigenvectorsA_sorted_d);
Arnoldi::matrixMultComplexMatrix_GPU(handle, N, k, k, eigenvectorsA_sorted_d, eigenvectorsH_d, eigenvectorsA_d); //here eigenvectorsA_d contain sorted eigenvectors of original problem
hipFree(sorted_list_d);
delete [] sorted_list;
hipFree(eigenvectorsH_d);
hipFree(eigenvectorsA_unsorted_d);
hipFree(eigenvectorsA_sorted_d);
if(verbose)
printf("\ndone\n");
printf("\nNumber of correct eigenvalues=%i Eigenvalues: \n", k);
for(int i=0;i<k;i++){
real ritz_val=ritz_vector[i];
printf("\n%.08le,%.08le,residual:,%.04le", (double) creal(eigenvaluesH_kk[i]), (double) cimag(eigenvaluesH_kk[i]), (double)residualAV[i] );
}
printf("\n");
delete [] residualAV;
//get Real and Imag parts of eigenvectors
cublas_complex_to_device_real(N*k, eigenvectorsA_d, eigenvectors_real_d, eigenvectors_imag_d);
bool do_plot=true;
if((verbose)&&(do_plot)){
printf("plotting output matrixes and vectors...\n");
real *vec_f_local=new real[N];
real *V_local=new real[N*m];
real *V1_local=new real[N*m];
Arnoldi::to_host_from_device_real_cpy(vec_f_local, vec_f_d, N, 1, 1); //vec_f_d -> vec_f
Arnoldi::to_host_from_device_real_cpy(V_local, V_d, N, m, 1); //vec_V_d -> vec_V
Arnoldi::to_host_from_device_real_cpy(V1_local, V1_d, N, m, 1);
real complex *eigenvectorsA=new real complex[N*k];
cublas_complex_to_complex_real(N*k, eigenvectorsA_d, eigenvectorsA);
real *V_real_local=new real[N*k];
real *V_imag_local=new real[N*k];
Arnoldi::to_host_from_device_real_cpy(V_real_local, eigenvectors_real_d, N, k, 1);
Arnoldi::to_host_from_device_real_cpy(V_imag_local, eigenvectors_imag_d, N, k, 1);
print_matrix("EigVecA.dat", N, k, eigenvectorsA);
print_matrix("V1.dat", N, k, V1_local);
print_matrix("V_real.dat", N, k, V_real_local);//eigenvectors_real_d
print_matrix("V_imag.dat", N, k, V_imag_local);//eigenvectors_imag_d
print_matrix("V.dat", N, k, V_local);
print_matrix("H.dat", m, m, H);
print_matrix("H1.dat", m, m, H1);
print_matrix("H2.dat", m, m, H2);
print_matrix("R.dat", m, m, R);
print_matrix("Q.dat", m, m, Q);
print_matrix("EigVecH.dat", k, k, eigenvectorsH_kk_sorted);
print_vector("EigH.dat", k, eigenvaluesH_kk);
print_vector("f.dat", N, vec_f_local);
delete [] eigenvectorsA;
delete [] vec_f_local;
delete [] V_local;
delete [] V1_local;
delete [] V_real_local;
delete [] V_imag_local;
printf("done\n");
}
hipFree(eigenvectorsA_d);
if(!external_eigenvectors){
hipFree(eigenvectors_real_d);
hipFree(eigenvectors_imag_d);
}
Arnoldi::device_deallocate_all_real(9, V_d, V1_d, vec_f1_d, vec_w_d, vec_v_d, vec_c_d, vec_h_d, vec_q_d, Q_d);
//Arnoldi::device_deallocate_all_real(6, Vl_r_d, Vl_i_d, Vr_r_d, Vr_i_d, Vre_d, Vim_d);
//free cublas
//hipblasDestroy(handle);
delete [] vec_c; delete [] vec_h; delete [] vec_q;
delete [] H; delete [] R; delete [] Q; delete [] H1; delete [] H2;
delete [] eigenvectorsH; delete [] eigenvaluesH; delete [] eigenvectorsH_kk;
delete [] eigenvectorsH_kk_sorted; delete [] eigenvaluesH_kk; delete [] ritz_vector;
return ritz_norm;
}
|
8e9910132b3c76ad9ded9d3de9ca8b080d85dc51.cu
|
#include "Implicit_restart_Arnoldi.h"
__global__ void real_to_cublasComplex_kernel(int N, real *vec_source_re, real *vec_source_im, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest[i].x=vec_source_re[i];
vec_dest[i].y=vec_source_im[i];
}
}
__global__ void real_to_cublasComplex_kernel(int N, real *vec_source_re, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest[i].x=vec_source_re[i];
vec_dest[i].y=0.0;
}
}
__global__ void cublasComplex_to_real_kernel(int N, cublasComplex *vec_source, real *vec_dest_re, real *vec_dest_im){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
vec_dest_re[i]=vec_source[i].x;
vec_dest_im[i]=vec_source[i].y;
}
}
void real_complex_to_cublas_complex(int N, complex real* cpu_complex, cublasComplex *gpu_complex){
real *cpu_real, *cpu_imag, *gpu_real, *gpu_imag;
Arnoldi::device_allocate_all_real(N,1, 1, 2,&gpu_real, &gpu_imag);
Arnoldi::allocate_real(N, 1, 1, 2,&cpu_real, &cpu_imag);
for(int j=0;j<N;j++){
cpu_real[j]=creal(cpu_complex[j]);
cpu_imag[j]=cimag(cpu_complex[j]);
}
Arnoldi::to_device_from_host_real_cpy(gpu_real, cpu_real, N, 1,1);
Arnoldi::to_device_from_host_real_cpy(gpu_imag, cpu_imag, N, 1,1);
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
real_to_cublasComplex_kernel<<< blocks, threads>>>(N, gpu_real, gpu_imag, gpu_complex);
Arnoldi::deallocate_real(2,cpu_real, cpu_imag);
Arnoldi::device_deallocate_all_real(2, gpu_real, gpu_imag);
}
void real_device_to_cublas_complex(int N, real* gpu_real, cublasComplex *gpu_complex){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
real_to_cublasComplex_kernel<<< blocks, threads>>>(N, gpu_real, gpu_complex);
}
void cublas_complex_to_complex_real(int N, cublasComplex *gpu_complex, complex real* cpu_complex){
real *cpu_real, *cpu_imag, *gpu_real, *gpu_imag;
Arnoldi::device_allocate_all_real(N,1, 1, 2,&gpu_real, &gpu_imag);
Arnoldi::allocate_real(N, 1, 1, 2,&cpu_real, &cpu_imag);
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
cublasComplex_to_real_kernel<<< blocks, threads>>>(N, gpu_complex, gpu_real, gpu_imag);
Arnoldi::to_host_from_device_real_cpy(cpu_real, gpu_real, N, 1,1);
Arnoldi::to_host_from_device_real_cpy(cpu_imag, gpu_imag, N, 1,1);
for(int j=0;j<N;j++){
cpu_complex[j]=cpu_real[j]+I*cpu_imag[j];
}
Arnoldi::deallocate_real(2,cpu_real, cpu_imag);
Arnoldi::device_deallocate_all_real(2, gpu_real, gpu_imag);
}
void cublas_complex_to_device_real(int N, cublasComplex *gpu_complex, real* gpu_real, real* gpu_imag){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
cublasComplex_to_real_kernel<<< blocks, threads>>>(N, gpu_complex, gpu_real, gpu_imag);
}
__global__ void permute_matrix_colums_kernel(int MatrixRaw, int coloms, int *sorted_list_d, cublasComplex *vec_source, cublasComplex *vec_dest){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<MatrixRaw){
for(int j=0;j<coloms;j++){
int index=sorted_list_d[j];
vec_dest[I2(i,j,MatrixRaw)]=vec_source[I2(i,index,MatrixRaw)];
}
}
}
void permute_matrix_colums(int MatrixRaw, int coloms, int *sorted_list_d, cublasComplex *vec_source, cublasComplex *vec_dest){
dim3 threads(BLOCKSIZE);
int blocks_x=(MatrixRaw+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
permute_matrix_colums_kernel<<< blocks, threads>>>(MatrixRaw, coloms, sorted_list_d, vec_source, vec_dest);
}
__global__ void RHS_of_eigenproblem_real_device_kernel(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=lambda_real*Vec_real[i]-lambda_imag*Vec_imag[i];
}
}
__global__ void RHS_of_eigenproblem_imag_device_kernel(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=lambda_imag*Vec_real[i]+lambda_real*Vec_imag[i];
}
}
void RHS_of_eigenproblem_device_real(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
RHS_of_eigenproblem_real_device_kernel<<< blocks, threads>>>(N, lambda_real, Vec_real, lambda_imag, Vec_imag, Vec_res);
}
void RHS_of_eigenproblem_device_imag(int N, real lambda_real, real* Vec_real, real lambda_imag, real* Vec_imag, real *Vec_res){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
RHS_of_eigenproblem_imag_device_kernel<<< blocks, threads>>>(N, lambda_real, Vec_real, lambda_imag, Vec_imag, Vec_res);
}
__global__ void Residual_eigenproblem_device_kernel(int N, real* Vl_r_d, real* Vr_r_d, real* Vec_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
Vec_res[i]=Vl_r_d[i]-Vr_r_d[i];
}
}
void Residual_eigenproblem_device(int N, real* Vl_r_d, real* Vr_r_d, real* Vre_d){
dim3 threads(BLOCKSIZE);
int blocks_x=(N+BLOCKSIZE)/BLOCKSIZE;
dim3 blocks(blocks_x);
Residual_eigenproblem_device_kernel<<< blocks, threads>>>(N, Vl_r_d, Vr_r_d, Vre_d);
}
void get_upper_matrix_part_host(int N_source, real *source_matrix, real *dest_matrix, int N_dist){
for(int i=0;i<N_dist;i++)
for(int j=0;j<N_dist;j++){
dest_matrix[I2(i,j,N_dist)]=source_matrix[I2(i,j,N_source)];
}
}
void permute_matrix_colums(int MatrixRaw, int coloms, int *sorted_list, real complex *vec_source, real complex *vec_dest){
for(int i=0;i<MatrixRaw;i++){
for(int j=0;j<coloms;j++){
int index=sorted_list[j];
vec_dest[I2(i,j,MatrixRaw)]=vec_source[I2(i,index,MatrixRaw)];
}
}
}
//which:
// "LR" - largest real, "LM" - largest magnitude
//
real Implicit_restart_Arnoldi_GPU_data(cublasHandle_t handle, bool verbose, int N, user_map_vector Axb, void *user_struct, real *vec_f_d, char which[2], int k, int m, complex real* eigenvaluesA, real tol, int max_iter, real *eigenvectors_real_d, real *eigenvectors_imag_d, int BLASThreads){
//wrapper without external routine like matrix Exponent
real ritz_norm=1.0;
ritz_norm=Implicit_restart_Arnoldi_GPU_data_Matrix_Exponent(handle, verbose, N, Axb, user_struct, Axb, user_struct, vec_f_d, which, which, k, m, eigenvaluesA, tol, max_iter, eigenvectors_real_d, eigenvectors_imag_d, BLASThreads);
return ritz_norm;
}
real Implicit_restart_Arnoldi_GPU_data_Matrix_Exponent(cublasHandle_t handle, bool verbose, int N, user_map_vector Axb_exponent_invert, void *user_struct_exponent_invert, user_map_vector Axb, void *user_struct, real *vec_f_d, char which[2], char which_exponent[2], int k, int m, complex real* eigenvaluesA, real tol, int max_iter, real *eigenvectors_real_d, real *eigenvectors_imag_d, int BLASThreads){
openblas_set_num_threads(BLASThreads); //sets number of threads to be used by OpenBLAS
real *vec_c=new real[m];
real *vec_h=new real[m];
real *vec_q=new real[m];
real *H=new real[m*m];
real *R=new real[m*m];
real *Q=new real[m*m];
real *H1=new real[m*m];
real *H2=new real[m*m];
matrixZero(m, m, H);
matrixZero(m, m, R);
matrixZero(m, m, Q);
matrixZero(m, m, H1);
matrixZero(m, m, H2);
real complex *eigenvectorsH=new real complex[m*m];
real complex *eigenvaluesH=new real complex[m*m];
real complex *eigenvectorsH_kk=new real complex[k*k];
real complex *eigenvectorsH_kk_sorted=new real complex[k*k];
real complex *eigenvaluesH_kk=new real complex[k*k];
real *ritz_vector=new real[m];
real *V_d, *V1_d, *Q_d; //matrixes on GPU
real *vec_f1_d, *vec_v_d, *vec_w_d, *vec_c_d, *vec_h_d, *vec_q_d; //vectors on GPU
//real *Vl_r_d, *Vl_i_d, *Vr_r_d, *Vr_i_d, *Vre_d, *Vim_d; //vectors on GPU for eigenvector residuals
//real *eigenvectors_real_d, *eigenvectors_imag_d; //Matrix Eigenvectors
bool external_eigenvectors=true;
if(eigenvectors_real_d==NULL){
external_eigenvectors=false;
Arnoldi::device_allocate_all_real(N,k, 1, 2, &eigenvectors_real_d, &eigenvectors_imag_d);
}
Arnoldi::device_allocate_all_real(N,m, 1, 2, &V_d, &V1_d);
Arnoldi::device_allocate_all_real(N, 1,1, 3, &vec_f1_d, &vec_w_d, &vec_v_d);
Arnoldi::device_allocate_all_real(m, 1,1, 3, &vec_c_d, &vec_h_d, &vec_q_d);
Arnoldi::device_allocate_all_real(m,m, 1, 1, &Q_d);
//Arnoldi::device_allocate_all_real(N, 1,1, 6, &Vl_r_d, &Vl_i_d, &Vr_r_d, &Vr_i_d, &Vre_d, &Vim_d);
//sets initial guesses for Krylov vectors
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_f1_d);
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_v_d);
Arnoldi::set_initial_Krylov_vector_value_GPU(N, vec_w_d);
// Allocate memroy for eigenvectors!
cublasComplex *eigenvectorsH_d, *eigenvectorsA_d, *eigenvectorsA_unsorted_d, *eigenvectorsA_sorted_d;
eigenvectorsH_d=Arnoldi::device_allocate_complex(k, k, 1);
eigenvectorsA_d=Arnoldi::device_allocate_complex(N, k, 1);
eigenvectorsA_unsorted_d=Arnoldi::device_allocate_complex(N, k, 1);
eigenvectorsA_sorted_d=Arnoldi::device_allocate_complex(N, k, 1);
// cublasHandle_t handle; //init cublas
// cublasStatus_t ret;
// ret = cublasCreate(&handle);
// Arnoldi::checkError(ret, " cublasCreate(). ");
int k0=1;
int iterations=0;
real ritz_norm=1.0;
timer_start();
while(((iterations++)<max_iter)&&(ritz_norm>tol)){
Arnoldi_driver(handle, N, Axb_exponent_invert, user_struct_exponent_invert, V_d, H, vec_f_d, k0-1, m, vec_v_d, vec_w_d, vec_c_d, vec_h_d, vec_h); //Build orthogonal Krylov subspace
select_shifts(m, H, which_exponent, eigenvectorsH, eigenvaluesH, ritz_vector); //select basisi shift depending on 'which'
QR_shifts(k, m, Q, H, eigenvaluesH, &k0); //Do QR shifts of basis. Returns active eigenvalue indexes and Q-matrix for basis shift
real vec_f_norm=Arnoldi::vector_norm2_GPU(handle, N, vec_f_d);
for(int i=0;i<k0;i++){
ritz_vector[i]=ritz_vector[i]*vec_f_norm;
}
get_matrix_colomn(m, m, Q, vec_q, k0);
real hl=H[I2(k0,k0-1,m)];
real ql=Q[I2(m-1,k0-1,m)];
//f = V*vec_q*hl + f*ql;
Arnoldi::to_device_from_host_real_cpy(vec_q_d, vec_q, m, 1,1); //vec_q -> vec_q_d
Arnoldi::matrixMultVector_GPU(handle, N, V_d, m, hl, vec_q_d, ql, vec_f_d);
//matrixMultVector(N, V, m, hl, vec_q, ql, vec_f1, vec_f); //GG
//fix this shit!!! V
//we must apply Q only as matrix mXk0 on a submatrix V NXm!!!
for(int i=0;i<m;i++){
for(int j=k0;j<m;j++){
Q[I2(i,j,m)]=1.0*delta(i,j);
}
}
//Copy matrixQtoGPUmemory!
//here!
Arnoldi::to_device_from_host_real_cpy(Q_d, Q, m, m, 1); //Q -> Q_d
Arnoldi::matrixMultMatrix_GPU(handle, N, m, m, V_d, 1.0, Q_d, 0.0, V1_d); //OK
//matrix_copy(N, m, V1, V); //GG
Arnoldi::vector_copy_GPU(handle, N*m, V1_d, V_d);
ritz_norm=vector_normC(k0,ritz_vector);
if(verbose){
printf("it=%i, ritz norms=", iterations);
for(int ll=0;ll<k0;ll++){
printf("%0.3le ",(double)ritz_vector[ll]);
}
printf("\n");
}
else{
// if(iterations%50==0)
// printf("it=%i, ritz norm_C=%.05e \n", iterations, ritz_norm);
}
}
timer_stop();
timer_print();
if(verbose)
printf("\ncomputing original map eigenvectors and eigenvalues...\n");
//test Schur!
real *Q_Schur=new real[k*k];
real *H_Schur=new real[k*k];
//get_upper_matrix_part_host(m, H, H_Schur, k);
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
H_Schur[I2(i,j,k)]=H[I2(i,j,m)];
}
}
print_matrix("H_pre.dat", k, k, H_Schur);
//check pre-Galerkin eigenvalues of H matrix
real complex *HC1=new real complex[k*k];
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
HC1[I2(i,j,k)]=H_Schur[I2(i,j,k)]+0.0*I;
//HC[I2(i,j,k)]=H[I2(i,j,m)]+0.0*I;
}
}
MatrixComplexEigensystem(eigenvectorsH_kk, eigenvaluesH_kk, HC1, k);
delete [] HC1;
printf("\n Eigenvalues of H matrix before Galerkin projection:\n");
for(int i=0;i<k;i++){
real ritz_val=ritz_vector[i];
printf("\n%.08le,%.08le,ritz:,%.04le", (double) creal(eigenvaluesH_kk[i]), (double) cimag(eigenvaluesH_kk[i]), (double)ritz_val );
}
//check ends
Schur_Hessinberg_matrix(H_Schur, k, Q_Schur); //returns Q as orthogonal matrix whose columns are the Schur vectors and the input matrix is overwritten as an upper quasi-triangular matrix (the Schur form of input matrix)
print_matrix("H_Schur.dat", k, k, H_Schur);
print_matrix("Q_Schur.dat", k, k, Q_Schur);
//compute eigenvectors
//[Q,R] = schur(H(1:ko,1:ko));
//V = V(:,1:ko)*Q; <--- eigenvectors
//R= V'*(A*V);
//eigens=eig(R); <--- eigenvalues
//residual: resid = norm(A*V - V*R);
real *Q_Schur_d;
real *Vcolres_d, *VRres_d;
//real *V1_temp=new real[N*k];
Arnoldi::device_allocate_all_real(k, k, 1, 1, &Q_Schur_d);
Arnoldi::device_allocate_all_real(N, k, 1, 2, &Vcolres_d, &VRres_d);
Arnoldi::to_device_from_host_real_cpy(Q_Schur_d, Q_Schur, k, k,1);
Arnoldi::matrixMultMatrix_GPU(handle, N, k, k, V_d, 1.0, Q_Schur_d, 0.0, V1_d); //Vectors are in V1_d!!!
//Arnoldi::to_host_from_device_real_cpy(V1_temp, V1_d, N, k, 1);
//print_matrix("V1_d.dat", N, k, V1_temp);
//form Vcolres_d=A*V1_d
for(int i=0;i<k;i++){
Axb(user_struct, &V1_d[i*N], &Vcolres_d[i*N]);
Arnoldi::check_for_nans("IRA: Schur basis projeciton out", N, &Vcolres_d[i*N]);
}
Arnoldi::matrixTMultMatrix_GPU(handle, k, k, N, V1_d, 1.0, Vcolres_d, 0.0, Q_Schur_d); //Vectors are in V1_d!!! Q_Schur_d := R in matlab
// Arnoldi::to_host_from_device_real_cpy(V1_temp, Vcolres_d, N, k, 1);
// print_matrix("Vcol_d.dat", N, k, V1_temp);
//delete [] V1_temp;
//check residual!
real *residualAV=new real[k];
for(int i=0;i<k;i++){
Axb(user_struct, &V1_d[i*N], &Vcolres_d[i*N]);
Arnoldi::check_for_nans("IRA: Schur basis projeciton out in residual", N, &Vcolres_d[i*N]);
}
Arnoldi::matrixMultMatrix_GPU(handle, N, k, k, V1_d, 1.0, Q_Schur_d, 0.0, VRres_d);
for(int i=0;i<k;i++){
Arnoldi::vectors_add_GPU(handle, N, -1.0, &Vcolres_d[i*N], &VRres_d[i*N]);
residualAV[i]=Arnoldi::vector_norm2_GPU(handle, N, &VRres_d[i*N]);
}
//done
Arnoldi::to_host_from_device_real_cpy(H_Schur, Q_Schur_d, k, k, 1);
//print_matrix("RRR.dat", k, k, H_Schur);
Arnoldi::device_deallocate_all_real(3, Q_Schur_d,Vcolres_d, VRres_d);
//170820 stopped here!!!
real complex *HC=new real complex[k*k];
for(int i=0;i<k;i++){
for(int j=0;j<k;j++){
HC[I2(i,j,k)]=H_Schur[I2(i,j,k)]+0.0*I;
//HC[I2(i,j,k)]=H[I2(i,j,m)]+0.0*I;
}
}
MatrixComplexEigensystem(eigenvectorsH_kk, eigenvaluesH_kk, HC, k);
delete [] HC;
delete [] Q_Schur;
delete [] H_Schur;
int *sorted_list=new int[k];
int *sorted_list_d=Arnoldi::device_allocate_int(k, 1, 1);
get_sorted_index(k, which, eigenvaluesH_kk, sorted_list);
//sort eigenvectors of Shur form of Hessinberg matrix
permute_matrix_colums(k, k, sorted_list, eigenvectorsH_kk, eigenvectorsH_kk_sorted);
// Now store EigenvectorsH to GPU as cublasComplex.
real_complex_to_cublas_complex(k*k, eigenvectorsH_kk_sorted, eigenvectorsH_d);
real_device_to_cublas_complex(N*k, V_d, eigenvectorsA_unsorted_d);
Arnoldi::to_device_from_host_int_cpy(sorted_list_d, sorted_list, k, 1, 1);
permute_matrix_colums(N, k, sorted_list_d, eigenvectorsA_unsorted_d, eigenvectorsA_sorted_d);
Arnoldi::matrixMultComplexMatrix_GPU(handle, N, k, k, eigenvectorsA_sorted_d, eigenvectorsH_d, eigenvectorsA_d); //here eigenvectorsA_d contain sorted eigenvectors of original problem
cudaFree(sorted_list_d);
delete [] sorted_list;
cudaFree(eigenvectorsH_d);
cudaFree(eigenvectorsA_unsorted_d);
cudaFree(eigenvectorsA_sorted_d);
if(verbose)
printf("\ndone\n");
printf("\nNumber of correct eigenvalues=%i Eigenvalues: \n", k);
for(int i=0;i<k;i++){
real ritz_val=ritz_vector[i];
printf("\n%.08le,%.08le,residual:,%.04le", (double) creal(eigenvaluesH_kk[i]), (double) cimag(eigenvaluesH_kk[i]), (double)residualAV[i] );
}
printf("\n");
delete [] residualAV;
//get Real and Imag parts of eigenvectors
cublas_complex_to_device_real(N*k, eigenvectorsA_d, eigenvectors_real_d, eigenvectors_imag_d);
bool do_plot=true;
if((verbose)&&(do_plot)){
printf("plotting output matrixes and vectors...\n");
real *vec_f_local=new real[N];
real *V_local=new real[N*m];
real *V1_local=new real[N*m];
Arnoldi::to_host_from_device_real_cpy(vec_f_local, vec_f_d, N, 1, 1); //vec_f_d -> vec_f
Arnoldi::to_host_from_device_real_cpy(V_local, V_d, N, m, 1); //vec_V_d -> vec_V
Arnoldi::to_host_from_device_real_cpy(V1_local, V1_d, N, m, 1);
real complex *eigenvectorsA=new real complex[N*k];
cublas_complex_to_complex_real(N*k, eigenvectorsA_d, eigenvectorsA);
real *V_real_local=new real[N*k];
real *V_imag_local=new real[N*k];
Arnoldi::to_host_from_device_real_cpy(V_real_local, eigenvectors_real_d, N, k, 1);
Arnoldi::to_host_from_device_real_cpy(V_imag_local, eigenvectors_imag_d, N, k, 1);
print_matrix("EigVecA.dat", N, k, eigenvectorsA);
print_matrix("V1.dat", N, k, V1_local);
print_matrix("V_real.dat", N, k, V_real_local);//eigenvectors_real_d
print_matrix("V_imag.dat", N, k, V_imag_local);//eigenvectors_imag_d
print_matrix("V.dat", N, k, V_local);
print_matrix("H.dat", m, m, H);
print_matrix("H1.dat", m, m, H1);
print_matrix("H2.dat", m, m, H2);
print_matrix("R.dat", m, m, R);
print_matrix("Q.dat", m, m, Q);
print_matrix("EigVecH.dat", k, k, eigenvectorsH_kk_sorted);
print_vector("EigH.dat", k, eigenvaluesH_kk);
print_vector("f.dat", N, vec_f_local);
delete [] eigenvectorsA;
delete [] vec_f_local;
delete [] V_local;
delete [] V1_local;
delete [] V_real_local;
delete [] V_imag_local;
printf("done\n");
}
cudaFree(eigenvectorsA_d);
if(!external_eigenvectors){
cudaFree(eigenvectors_real_d);
cudaFree(eigenvectors_imag_d);
}
Arnoldi::device_deallocate_all_real(9, V_d, V1_d, vec_f1_d, vec_w_d, vec_v_d, vec_c_d, vec_h_d, vec_q_d, Q_d);
//Arnoldi::device_deallocate_all_real(6, Vl_r_d, Vl_i_d, Vr_r_d, Vr_i_d, Vre_d, Vim_d);
//free cublas
//cublasDestroy(handle);
delete [] vec_c; delete [] vec_h; delete [] vec_q;
delete [] H; delete [] R; delete [] Q; delete [] H1; delete [] H2;
delete [] eigenvectorsH; delete [] eigenvaluesH; delete [] eigenvectorsH_kk;
delete [] eigenvectorsH_kk_sorted; delete [] eigenvaluesH_kk; delete [] ritz_vector;
return ritz_norm;
}
|
ee1644e140419fd50ae5bc500f70da87e51dfbca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <cuda_gl_interop.h>
#define CSC(call) { \
hipError_t err = call; \
if(err != hipSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, hipGetErrorString(err)); \
exit(1); \
} \
} while (0)
#define square(x) ((x)*(x))
#define THREADS 128
struct particle
{
double2 coord;
double2 velocity;
double2 best_coord;
double2 repultion_force;
};
const int width = 1280;
const int height = 720;
double scale_x = 500;
double scale_y = scale_x * height / width;
const int particle_cnt = 7000;
const double inertia = 0.96;
const double coef_local = 0.4;
const double coef_global = 0.15;
const double coef_repultion = 0.5;
const double dt = 0.07;
const dim3 blocks2D(128, 128);
const dim3 threads2D(32, 32);
const int threads_reduce = 1024;
const int blocks_reduce = width * height / threads_reduce + 1;
const int threads1D = THREADS;
const int blocks1D = (int)ceil((double)particle_cnt / THREADS);
__constant__ double pi = 3.1415;
__constant__ int seed = 1234;
__device__ double dev_center_x = 0;
__device__ double dev_center_y = 0;
__device__ double dev_func_min;
__device__ double dev_func_max;
__device__ double image[height * width];
__device__ double2 g_best;
hiprandState_t* dev_states;
particle *dev_swarm;
struct cudaGraphicsResource *res;
double *arr_max_after_reduce_dev;
double *arr_min_after_reduce_dev;
double2 *global_best_after_reduce;
GLuint vbo;
__device__ double rosenbrock(double2 arg) {
return square((1 - arg.x)) + 100 * square((arg.y - square(arg.x)));
}
__global__ void rosenbrock_image(double scale_x, double scale_y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
double x, y;
for (int j = idy; j < height; j += offsety)
{
for (int i = idx; i < width; i += offsetx)
{
x = (2.0f * i / (double)(width - 1) - 1.0f) * scale_x + dev_center_x;
y = -(2.0f * j / (double)(height - 1) - 1.0f) * scale_y + dev_center_y;
image[j * width + i] = rosenbrock(make_double2(x, y));
}
}
}
__global__ void minmax_reduce(double *arr_min_after_reduce, double *arr_max_after_reduce)
{
__shared__ double shared_min[threads_reduce];
__shared__ double shared_max[threads_reduce];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < width * height)
{
shared_min[threadIdx.x] = image[idx];
shared_max[threadIdx.x] = image[idx];
}
else
{
shared_min[threadIdx.x] = INFINITY;
shared_max[threadIdx.x] = -INFINITY;
}
__syncthreads();
for (int step = 2; step <= threads_reduce; step *= 2)
{
if (threadIdx.x * (step + 1) - 1 < threads_reduce)
{
shared_min[threadIdx.x * (step + 1) - 1] = (shared_min[threadIdx.x * (step + 1) - 1] < shared_min[threadIdx.x * (step + 1) - step / 2 - 1]) ? shared_min[threadIdx.x * (step + 1) - 1] : shared_min[threadIdx.x * (step + 1) - step / 2 - 1];
shared_max[threadIdx.x * (step + 1) - 1] = (shared_max[threadIdx.x * (step + 1) - 1] > shared_max[threadIdx.x * (step + 1) - step / 2 - 1]) ? shared_max[threadIdx.x * (step + 1) - 1] : shared_max[threadIdx.x * (step + 1) - step / 2 - 1];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
arr_min_after_reduce[blockIdx.x] = shared_min[threads_reduce - 1];
arr_max_after_reduce[blockIdx.x] = shared_max[threads_reduce - 1];
}
}
__global__ void minmax(double *arr_min_after_reduce, double *arr_max_after_reduce, int size)
{
double min = INFINITY;
double max = -INFINITY;
for (int i = 0; i < size; i++)
{
if (arr_min_after_reduce[i] < min)
min = arr_min_after_reduce[i];
if (arr_max_after_reduce[i] > max)
max = arr_max_after_reduce[i];
}
dev_func_min = min;
dev_func_max = max;
}
__device__ uchar4 get_color(double f) {
float k = 1.0 / 6.0;
if (f <= 0)
return make_uchar4(0, 0, 0, 0);
if (f < k)
return make_uchar4((int)(f * 255 / k), 0, 0, 0);
if (f < 2 * k)
return make_uchar4(255, (int)((f - k) * 255 / k), 0, 0);
if (f < 3 * k)
return make_uchar4(255, 255, (int)((f - 2 * k) * 255 / k), 0);
if (f < 4 * k)
return make_uchar4(255 - (int)((f - 3 * k) * 255 / k), 255, 255, 0);
if (f < 5 * k)
return make_uchar4(0, 255 - (int)((f - 4 * k) * 255 / k), 255, 0);
if (f <= 6 * k)
return make_uchar4(0, 0, 255 - (int)((f - 5 * k) * 255 / k), 0);
return make_uchar4(0, 0, 0, 0);
}
__global__ void heatmap(uchar4* data)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int j = idy; j < height; j += offsety)
{
for (int i = idx; i < width; i += offsetx)
{
data[j * width + i] = get_color((image[j * width + i] - dev_func_min) / (dev_func_max - dev_func_min));
}
}
}
__global__ void update_coords_and_velocities(double inertia, double coef_local, double coef_global, double dt, double coef_repultion,
particle *swarm, int particle_cnt, uchar4* data, double scale_x, double scale_y, hiprandState_t * state)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < particle_cnt; i += offsetx)
{
swarm[idx].velocity.x = inertia * swarm[idx].velocity.x + (coef_local * hiprand_uniform(&state[idx]) * (swarm[idx].best_coord.x - swarm[idx].coord.x) +
coef_global * hiprand_uniform(state) * (g_best.x - swarm[idx].coord.x) + coef_repultion * swarm[idx].repultion_force.x) * dt;
swarm[idx].velocity.y = inertia * swarm[idx].velocity.y + (coef_local * hiprand_uniform(&state[idx]) * (swarm[idx].best_coord.y - swarm[idx].coord.y) +
coef_global * hiprand_uniform(state) * (g_best.y - swarm[idx].coord.y) + coef_repultion * swarm[idx].repultion_force.y) * dt;
swarm[idx].coord.x += swarm[idx].velocity.x * dt;
swarm[idx].coord.y += swarm[idx].velocity.y * dt;
if (rosenbrock(make_double2(swarm[idx].coord.x, swarm[idx].coord.y)) < rosenbrock(make_double2(swarm[idx].best_coord.x, swarm[idx].best_coord.y)))
{
swarm[idx].best_coord.x = swarm[idx].coord.x;
swarm[idx].best_coord.y = swarm[idx].coord.y;
}
double2 particle_draw_coord;
particle_draw_coord.x = (((swarm[idx].coord.x - dev_center_x) / scale_x) + 1) * (width - 1) / 2;
particle_draw_coord.y = (1 - ((swarm[idx].coord.y - dev_center_y) / scale_y)) * (height - 1) / 2;
if (particle_draw_coord.x > 0 && particle_draw_coord.x < width && particle_draw_coord.y > 0 && particle_draw_coord.y < height)
{
data[(int)particle_draw_coord.y * width + (int)particle_draw_coord.x] = make_uchar4(255, 255, 255, 255);
}
}
}
__global__ void repulsive_force(particle *swarm, int particle_cnt)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
double square_dist;
for (int i = idx; i < particle_cnt; i += offsetx)
{
for (int j = 0; j < particle_cnt; j += 1)
{
square_dist = square(swarm[j].coord.x - swarm[i].coord.x) + square(swarm[j].coord.y - swarm[i].coord.y);
swarm[i].repultion_force.x -= (swarm[j].coord.x - swarm[i].coord.x) / (square(square_dist) + 1e-3);
swarm[i].repultion_force.y -= (swarm[j].coord.y - swarm[i].coord.y) / (square(square_dist) + 1e-3);
}
}
}
__global__ void update_window_center(particle *swarm, int particle_cnt)
{
double2 sum = make_double2(0, 0);
for (int i = 0; i < particle_cnt; i++)
{
sum.x += swarm[i].coord.x;
sum.y += swarm[i].coord.y;
}
sum.x /= particle_cnt;
sum.y /= particle_cnt;
dev_center_x = sum.x;
dev_center_y = sum.y;
}
__global__ void global_best_reduce(particle *swarm, double2 *global_best_after_reduce, int particle_cnt)
{
__shared__ double2 shared_min[threads_reduce];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < particle_cnt) {
shared_min[threadIdx.x] = swarm[idx].coord;
}
else {
shared_min[threadIdx.x] = make_double2(INFINITY, INFINITY);
}
__syncthreads();
for (int step = 2; step <= threads_reduce; step *= 2)
{
if (threadIdx.x * (step + 1) - 1 < threads_reduce)
{
shared_min[threadIdx.x * (step + 1) - 1] = (rosenbrock(shared_min[threadIdx.x * (step + 1) - 1]) < rosenbrock(shared_min[threadIdx.x * (step + 1) - step / 2 - 1])) ?
shared_min[threadIdx.x * (step + 1) - 1] : shared_min[threadIdx.x * (step + 1) - step / 2 - 1];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
global_best_after_reduce[blockIdx.x] = shared_min[threads_reduce - 1];
}
}
__global__ void global_best_final(particle *swarm, double2 *global_best_after_reduce, int size, int particle_cnt)
{
for (int i = 0; i < size; i++)
{
if (rosenbrock(global_best_after_reduce[i]) < rosenbrock(g_best))
g_best = global_best_after_reduce[i];
}
}
__global__ void swarm_start(particle *swarm, int particle_cnt, hiprandState_t * state)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < particle_cnt; i += offsetx)
{
hiprand_init(seed, idx, 0, &state[idx]);
swarm[idx].best_coord.x = swarm[idx].coord.x = hiprand_uniform(&state[idx]) * width * cos((double)idx / THREADS * 2 * pi);
swarm[idx].best_coord.y = swarm[idx].coord.y = hiprand_uniform(&state[idx]) * height * sin((double)idx / THREADS * 2 * pi);
swarm[idx].velocity = make_double2(0, 0);
swarm[idx].repultion_force = make_double2(0, 0);
}
}
void update() {
float time;
hipEvent_t start, stop;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
CSC(hipEventRecord(start, 0));
size_t size;
uchar4* image_heatmap;
CSC(hipGraphicsMapResources(1, &res, 0));
CSC(hipGraphicsResourceGetMappedPointer((void**)&image_heatmap, &size, res));
update_window_center << <1, 32 >> > (dev_swarm, particle_cnt);
CSC(hipGetLastError());
rosenbrock_image << <blocks2D, threads2D >> > (scale_x, scale_y);
CSC(hipGetLastError());
minmax_reduce << <blocks_reduce, threads_reduce >> > (arr_min_after_reduce_dev, arr_max_after_reduce_dev);
CSC(hipGetLastError());
minmax << <1, 1 >> > (arr_min_after_reduce_dev, arr_max_after_reduce_dev, blocks_reduce);
CSC(hipGetLastError());
heatmap << <blocks2D, threads2D >> > (image_heatmap);
CSC(hipGetLastError());
repulsive_force << <blocks1D, threads1D >> > (dev_swarm, particle_cnt);
CSC(hipGetLastError());
update_coords_and_velocities << <blocks1D, threads1D >> > (inertia, coef_local, coef_global, dt, coef_repultion, dev_swarm, particle_cnt, image_heatmap, scale_x, scale_y, dev_states);
CSC(hipGetLastError());
global_best_reduce << <ceil((double)particle_cnt / threads_reduce), threads_reduce >> > (dev_swarm, global_best_after_reduce, particle_cnt);
CSC(hipGetLastError());
global_best_final << <1, 32 >> > (dev_swarm, global_best_after_reduce, blocks_reduce, particle_cnt);
CSC(hipGetLastError());
CSC(hipDeviceSynchronize());
CSC(hipGraphicsUnmapResources(1, &res, 0));
CSC(hipEventRecord(stop, 0));
CSC(hipEventSynchronize(stop));
CSC(hipEventElapsedTime(&time, start, stop));
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(stop));
printf("%.4f\n", time);
glutPostRedisplay();
}
void display() {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
void keys(unsigned char Key, int x, int y)
{
switch (Key)
{
case 27:
CSC(hipGraphicsUnregisterResource(res));
glBindBuffer(1, vbo);
glDeleteBuffers(1, &vbo);
exit(0);
break;
case 'q':
scale_x *= 1.05;//20;
scale_y = scale_x * height / width;
break;
case 'e':
if (scale_x > 30)
{
scale_x *= 0.95;//20;
scale_y = scale_x * height / width;
}
break;
};
}
int main(int argc, char** argv)
{
CSC(hipMalloc(&dev_swarm, sizeof(particle) * (int)(ceil(particle_cnt / (double)THREADS)) * THREADS));
CSC(hipMalloc(&dev_states, sizeof(hiprandState_t) * (int)(ceil(particle_cnt / (double)THREADS)) * THREADS));
CSC(hipMalloc(&global_best_after_reduce, sizeof(double2) * ceil(particle_cnt / (double)THREADS)));
CSC(hipMalloc(&arr_max_after_reduce_dev, sizeof(double) * blocks_reduce));
CSC(hipMalloc(&arr_min_after_reduce_dev, sizeof(double) * blocks_reduce));
swarm_start << <blocks1D, threads1D >> > (dev_swarm, particle_cnt, dev_states);
CSC(hipGetLastError());
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("YakimovichCP");
glutIdleFunc(update);
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height);
glewInit();
glGenBuffers(1, &vbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, vbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width * height * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW);
CSC(hipGraphicsGLRegisterBuffer(&res, vbo, hipGraphicsMapFlagsWriteDiscard));
glutMainLoop();
return 0;
}
|
ee1644e140419fd50ae5bc500f70da87e51dfbca.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <cuda_gl_interop.h>
#define CSC(call) { \
cudaError err = call; \
if(err != cudaSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(1); \
} \
} while (0)
#define square(x) ((x)*(x))
#define THREADS 128
struct particle
{
double2 coord;
double2 velocity;
double2 best_coord;
double2 repultion_force;
};
const int width = 1280;
const int height = 720;
double scale_x = 500;
double scale_y = scale_x * height / width;
const int particle_cnt = 7000;
const double inertia = 0.96;
const double coef_local = 0.4;
const double coef_global = 0.15;
const double coef_repultion = 0.5;
const double dt = 0.07;
const dim3 blocks2D(128, 128);
const dim3 threads2D(32, 32);
const int threads_reduce = 1024;
const int blocks_reduce = width * height / threads_reduce + 1;
const int threads1D = THREADS;
const int blocks1D = (int)ceil((double)particle_cnt / THREADS);
__constant__ double pi = 3.1415;
__constant__ int seed = 1234;
__device__ double dev_center_x = 0;
__device__ double dev_center_y = 0;
__device__ double dev_func_min;
__device__ double dev_func_max;
__device__ double image[height * width];
__device__ double2 g_best;
curandState* dev_states;
particle *dev_swarm;
struct cudaGraphicsResource *res;
double *arr_max_after_reduce_dev;
double *arr_min_after_reduce_dev;
double2 *global_best_after_reduce;
GLuint vbo;
__device__ double rosenbrock(double2 arg) {
return square((1 - arg.x)) + 100 * square((arg.y - square(arg.x)));
}
__global__ void rosenbrock_image(double scale_x, double scale_y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
double x, y;
for (int j = idy; j < height; j += offsety)
{
for (int i = idx; i < width; i += offsetx)
{
x = (2.0f * i / (double)(width - 1) - 1.0f) * scale_x + dev_center_x;
y = -(2.0f * j / (double)(height - 1) - 1.0f) * scale_y + dev_center_y;
image[j * width + i] = rosenbrock(make_double2(x, y));
}
}
}
__global__ void minmax_reduce(double *arr_min_after_reduce, double *arr_max_after_reduce)
{
__shared__ double shared_min[threads_reduce];
__shared__ double shared_max[threads_reduce];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < width * height)
{
shared_min[threadIdx.x] = image[idx];
shared_max[threadIdx.x] = image[idx];
}
else
{
shared_min[threadIdx.x] = INFINITY;
shared_max[threadIdx.x] = -INFINITY;
}
__syncthreads();
for (int step = 2; step <= threads_reduce; step *= 2)
{
if (threadIdx.x * (step + 1) - 1 < threads_reduce)
{
shared_min[threadIdx.x * (step + 1) - 1] = (shared_min[threadIdx.x * (step + 1) - 1] < shared_min[threadIdx.x * (step + 1) - step / 2 - 1]) ? shared_min[threadIdx.x * (step + 1) - 1] : shared_min[threadIdx.x * (step + 1) - step / 2 - 1];
shared_max[threadIdx.x * (step + 1) - 1] = (shared_max[threadIdx.x * (step + 1) - 1] > shared_max[threadIdx.x * (step + 1) - step / 2 - 1]) ? shared_max[threadIdx.x * (step + 1) - 1] : shared_max[threadIdx.x * (step + 1) - step / 2 - 1];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
arr_min_after_reduce[blockIdx.x] = shared_min[threads_reduce - 1];
arr_max_after_reduce[blockIdx.x] = shared_max[threads_reduce - 1];
}
}
__global__ void minmax(double *arr_min_after_reduce, double *arr_max_after_reduce, int size)
{
double min = INFINITY;
double max = -INFINITY;
for (int i = 0; i < size; i++)
{
if (arr_min_after_reduce[i] < min)
min = arr_min_after_reduce[i];
if (arr_max_after_reduce[i] > max)
max = arr_max_after_reduce[i];
}
dev_func_min = min;
dev_func_max = max;
}
__device__ uchar4 get_color(double f) {
float k = 1.0 / 6.0;
if (f <= 0)
return make_uchar4(0, 0, 0, 0);
if (f < k)
return make_uchar4((int)(f * 255 / k), 0, 0, 0);
if (f < 2 * k)
return make_uchar4(255, (int)((f - k) * 255 / k), 0, 0);
if (f < 3 * k)
return make_uchar4(255, 255, (int)((f - 2 * k) * 255 / k), 0);
if (f < 4 * k)
return make_uchar4(255 - (int)((f - 3 * k) * 255 / k), 255, 255, 0);
if (f < 5 * k)
return make_uchar4(0, 255 - (int)((f - 4 * k) * 255 / k), 255, 0);
if (f <= 6 * k)
return make_uchar4(0, 0, 255 - (int)((f - 5 * k) * 255 / k), 0);
return make_uchar4(0, 0, 0, 0);
}
__global__ void heatmap(uchar4* data)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int j = idy; j < height; j += offsety)
{
for (int i = idx; i < width; i += offsetx)
{
data[j * width + i] = get_color((image[j * width + i] - dev_func_min) / (dev_func_max - dev_func_min));
}
}
}
__global__ void update_coords_and_velocities(double inertia, double coef_local, double coef_global, double dt, double coef_repultion,
particle *swarm, int particle_cnt, uchar4* data, double scale_x, double scale_y, curandState * state)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < particle_cnt; i += offsetx)
{
swarm[idx].velocity.x = inertia * swarm[idx].velocity.x + (coef_local * curand_uniform(&state[idx]) * (swarm[idx].best_coord.x - swarm[idx].coord.x) +
coef_global * curand_uniform(state) * (g_best.x - swarm[idx].coord.x) + coef_repultion * swarm[idx].repultion_force.x) * dt;
swarm[idx].velocity.y = inertia * swarm[idx].velocity.y + (coef_local * curand_uniform(&state[idx]) * (swarm[idx].best_coord.y - swarm[idx].coord.y) +
coef_global * curand_uniform(state) * (g_best.y - swarm[idx].coord.y) + coef_repultion * swarm[idx].repultion_force.y) * dt;
swarm[idx].coord.x += swarm[idx].velocity.x * dt;
swarm[idx].coord.y += swarm[idx].velocity.y * dt;
if (rosenbrock(make_double2(swarm[idx].coord.x, swarm[idx].coord.y)) < rosenbrock(make_double2(swarm[idx].best_coord.x, swarm[idx].best_coord.y)))
{
swarm[idx].best_coord.x = swarm[idx].coord.x;
swarm[idx].best_coord.y = swarm[idx].coord.y;
}
double2 particle_draw_coord;
particle_draw_coord.x = (((swarm[idx].coord.x - dev_center_x) / scale_x) + 1) * (width - 1) / 2;
particle_draw_coord.y = (1 - ((swarm[idx].coord.y - dev_center_y) / scale_y)) * (height - 1) / 2;
if (particle_draw_coord.x > 0 && particle_draw_coord.x < width && particle_draw_coord.y > 0 && particle_draw_coord.y < height)
{
data[(int)particle_draw_coord.y * width + (int)particle_draw_coord.x] = make_uchar4(255, 255, 255, 255);
}
}
}
__global__ void repulsive_force(particle *swarm, int particle_cnt)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
double square_dist;
for (int i = idx; i < particle_cnt; i += offsetx)
{
for (int j = 0; j < particle_cnt; j += 1)
{
square_dist = square(swarm[j].coord.x - swarm[i].coord.x) + square(swarm[j].coord.y - swarm[i].coord.y);
swarm[i].repultion_force.x -= (swarm[j].coord.x - swarm[i].coord.x) / (square(square_dist) + 1e-3);
swarm[i].repultion_force.y -= (swarm[j].coord.y - swarm[i].coord.y) / (square(square_dist) + 1e-3);
}
}
}
__global__ void update_window_center(particle *swarm, int particle_cnt)
{
double2 sum = make_double2(0, 0);
for (int i = 0; i < particle_cnt; i++)
{
sum.x += swarm[i].coord.x;
sum.y += swarm[i].coord.y;
}
sum.x /= particle_cnt;
sum.y /= particle_cnt;
dev_center_x = sum.x;
dev_center_y = sum.y;
}
__global__ void global_best_reduce(particle *swarm, double2 *global_best_after_reduce, int particle_cnt)
{
__shared__ double2 shared_min[threads_reduce];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < particle_cnt) {
shared_min[threadIdx.x] = swarm[idx].coord;
}
else {
shared_min[threadIdx.x] = make_double2(INFINITY, INFINITY);
}
__syncthreads();
for (int step = 2; step <= threads_reduce; step *= 2)
{
if (threadIdx.x * (step + 1) - 1 < threads_reduce)
{
shared_min[threadIdx.x * (step + 1) - 1] = (rosenbrock(shared_min[threadIdx.x * (step + 1) - 1]) < rosenbrock(shared_min[threadIdx.x * (step + 1) - step / 2 - 1])) ?
shared_min[threadIdx.x * (step + 1) - 1] : shared_min[threadIdx.x * (step + 1) - step / 2 - 1];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
global_best_after_reduce[blockIdx.x] = shared_min[threads_reduce - 1];
}
}
__global__ void global_best_final(particle *swarm, double2 *global_best_after_reduce, int size, int particle_cnt)
{
for (int i = 0; i < size; i++)
{
if (rosenbrock(global_best_after_reduce[i]) < rosenbrock(g_best))
g_best = global_best_after_reduce[i];
}
}
__global__ void swarm_start(particle *swarm, int particle_cnt, curandState * state)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < particle_cnt; i += offsetx)
{
curand_init(seed, idx, 0, &state[idx]);
swarm[idx].best_coord.x = swarm[idx].coord.x = curand_uniform(&state[idx]) * width * cos((double)idx / THREADS * 2 * pi);
swarm[idx].best_coord.y = swarm[idx].coord.y = curand_uniform(&state[idx]) * height * sin((double)idx / THREADS * 2 * pi);
swarm[idx].velocity = make_double2(0, 0);
swarm[idx].repultion_force = make_double2(0, 0);
}
}
void update() {
float time;
cudaEvent_t start, stop;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));
size_t size;
uchar4* image_heatmap;
CSC(cudaGraphicsMapResources(1, &res, 0));
CSC(cudaGraphicsResourceGetMappedPointer((void**)&image_heatmap, &size, res));
update_window_center << <1, 32 >> > (dev_swarm, particle_cnt);
CSC(cudaGetLastError());
rosenbrock_image << <blocks2D, threads2D >> > (scale_x, scale_y);
CSC(cudaGetLastError());
minmax_reduce << <blocks_reduce, threads_reduce >> > (arr_min_after_reduce_dev, arr_max_after_reduce_dev);
CSC(cudaGetLastError());
minmax << <1, 1 >> > (arr_min_after_reduce_dev, arr_max_after_reduce_dev, blocks_reduce);
CSC(cudaGetLastError());
heatmap << <blocks2D, threads2D >> > (image_heatmap);
CSC(cudaGetLastError());
repulsive_force << <blocks1D, threads1D >> > (dev_swarm, particle_cnt);
CSC(cudaGetLastError());
update_coords_and_velocities << <blocks1D, threads1D >> > (inertia, coef_local, coef_global, dt, coef_repultion, dev_swarm, particle_cnt, image_heatmap, scale_x, scale_y, dev_states);
CSC(cudaGetLastError());
global_best_reduce << <ceil((double)particle_cnt / threads_reduce), threads_reduce >> > (dev_swarm, global_best_after_reduce, particle_cnt);
CSC(cudaGetLastError());
global_best_final << <1, 32 >> > (dev_swarm, global_best_after_reduce, blocks_reduce, particle_cnt);
CSC(cudaGetLastError());
CSC(cudaDeviceSynchronize());
CSC(cudaGraphicsUnmapResources(1, &res, 0));
CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&time, start, stop));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
printf("%.4f\n", time);
glutPostRedisplay();
}
void display() {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
void keys(unsigned char Key, int x, int y)
{
switch (Key)
{
case 27:
CSC(cudaGraphicsUnregisterResource(res));
glBindBuffer(1, vbo);
glDeleteBuffers(1, &vbo);
exit(0);
break;
case 'q':
scale_x *= 1.05;//20;
scale_y = scale_x * height / width;
break;
case 'e':
if (scale_x > 30)
{
scale_x *= 0.95;//20;
scale_y = scale_x * height / width;
}
break;
};
}
int main(int argc, char** argv)
{
CSC(cudaMalloc(&dev_swarm, sizeof(particle) * (int)(ceil(particle_cnt / (double)THREADS)) * THREADS));
CSC(cudaMalloc(&dev_states, sizeof(curandState) * (int)(ceil(particle_cnt / (double)THREADS)) * THREADS));
CSC(cudaMalloc(&global_best_after_reduce, sizeof(double2) * ceil(particle_cnt / (double)THREADS)));
CSC(cudaMalloc(&arr_max_after_reduce_dev, sizeof(double) * blocks_reduce));
CSC(cudaMalloc(&arr_min_after_reduce_dev, sizeof(double) * blocks_reduce));
swarm_start << <blocks1D, threads1D >> > (dev_swarm, particle_cnt, dev_states);
CSC(cudaGetLastError());
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("YakimovichCP");
glutIdleFunc(update);
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height);
glewInit();
glGenBuffers(1, &vbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, vbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width * height * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW);
CSC(cudaGraphicsGLRegisterBuffer(&res, vbo, cudaGraphicsMapFlagsWriteDiscard));
glutMainLoop();
return 0;
}
|
95613da81fbf8309865e5b1813c890e4abe31450.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* from: https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/
* Jialin Liu
* Simple starting cpp cuda program
* Jun 24 2017, Saturday, 2:09pm
* Compile and test on Maeve, a 3GPU single node at NERSC, LBNL, CA.
*/
#include<iostream>
#include<math.h>
using namespace std;
//CUDA kernel functions to add the elements of two arrays
__global__
void add (int n, float *x, float * y){
for (int i=0;i<n;i++){
y[i] = x[i] + y[i];
}
}
int main(void)
{
int N= 1<<20; //1 million elements
//float * x= new float[N];
//float * y= new float[N];
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
clock_t t;
//Initialize x and y arrays on the host
for (int i=0; i<N; i++){
x[i] =1.5f;
y[i] =2.3f;
}
//run kernel on 1M elements on the CPU
t = clock();
//add(N, x, y);
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
t = clock() -t;
//cout<<format("%f seconds")%((float)t/CLOCKS_PER_SEC)<<endl;
cout <<(float)t/CLOCKS_PER_SEC<<" seconds"<<endl;
//Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i =0;i <N;i ++)
maxError =fmax(maxError, fabs(y[i]-3.8f));
cout <<"Max error: "<<maxError <<endl;
//delete [] x;
//delete [] y;
hipFree(x);
hipFree(y);
return 0;
}
|
95613da81fbf8309865e5b1813c890e4abe31450.cu
|
/* from: https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/
* Jialin Liu
* Simple starting cpp cuda program
* Jun 24 2017, Saturday, 2:09pm
* Compile and test on Maeve, a 3GPU single node at NERSC, LBNL, CA.
*/
#include<iostream>
#include<math.h>
using namespace std;
//CUDA kernel functions to add the elements of two arrays
__global__
void add (int n, float *x, float * y){
for (int i=0;i<n;i++){
y[i] = x[i] + y[i];
}
}
int main(void)
{
int N= 1<<20; //1 million elements
//float * x= new float[N];
//float * y= new float[N];
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
clock_t t;
//Initialize x and y arrays on the host
for (int i=0; i<N; i++){
x[i] =1.5f;
y[i] =2.3f;
}
//run kernel on 1M elements on the CPU
t = clock();
//add(N, x, y);
add<<<1, 1>>>(N, x, y);
t = clock() -t;
//cout<<format("%f seconds")%((float)t/CLOCKS_PER_SEC)<<endl;
cout <<(float)t/CLOCKS_PER_SEC<<" seconds"<<endl;
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i =0;i <N;i ++)
maxError =fmax(maxError, fabs(y[i]-3.8f));
cout <<"Max error: "<<maxError <<endl;
//delete [] x;
//delete [] y;
cudaFree(x);
cudaFree(y);
return 0;
}
|
dc373fc5b42612a8c6fd255682a14ed77e8cf091.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define NB 64
// adds x += r (including conversion to double) --and--
// copies w = b
// each thread does one index, x[i] and w[i]
__global__ void
zcaxpycp_kernel(
int m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w )
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_Z_ADD( x[i], cuComplexFloatToDouble( r[i] ) );
w[i] = b[i];
}
}
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
zaxpycp_kernel(
int m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_Z_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r (including conversion to double) --and--
// copies w = b
extern "C" void
magmablas_zcaxpycp_q(
magma_int_t m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( zcaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b, w );
}
extern "C" void
magmablas_zcaxpycp(
magma_int_t m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w)
{
magmablas_zcaxpycp_q( m, r, x, b, w, magma_stream );
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_zaxpycp_q(
magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( zaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b );
}
extern "C" void
magmablas_zaxpycp(
magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b)
{
magmablas_zaxpycp_q( m, r, x, b, magma_stream );
}
|
dc373fc5b42612a8c6fd255682a14ed77e8cf091.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#define NB 64
// adds x += r (including conversion to double) --and--
// copies w = b
// each thread does one index, x[i] and w[i]
__global__ void
zcaxpycp_kernel(
int m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w )
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_Z_ADD( x[i], cuComplexFloatToDouble( r[i] ) );
w[i] = b[i];
}
}
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
zaxpycp_kernel(
int m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_Z_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r (including conversion to double) --and--
// copies w = b
extern "C" void
magmablas_zcaxpycp_q(
magma_int_t m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
zcaxpycp_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b, w );
}
extern "C" void
magmablas_zcaxpycp(
magma_int_t m, magmaFloatComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b, magmaDoubleComplex *w)
{
magmablas_zcaxpycp_q( m, r, x, b, w, magma_stream );
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_zaxpycp_q(
magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
zaxpycp_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b );
}
extern "C" void
magmablas_zaxpycp(
magma_int_t m, magmaDoubleComplex *r, magmaDoubleComplex *x,
const magmaDoubleComplex *b)
{
magmablas_zaxpycp_q( m, r, x, b, magma_stream );
}
|
7bc778771d277c122dd78792d23220e9390e62fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../include/Particles.cuh"
#include "../include/Options.cuh"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <stdio.h>
extern __constant__ int d_particlesNumber;
extern __constant__ int d_dimensions;
extern __constant__ boxConstraints d_initializationBoxConstraints;
extern __constant__ boxConstraints d_solutionBoxConstraints;
__global__ void _Particles_Particles_initPrng(int seed, hiprandState_t* d_prngStates)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
hiprand_init(seed, particleId, 0, &d_prngStates[particleId]);
}
__global__ void _PsoParticles_computeCosts_Task1(float* d_positions, float* d_costs)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
float subSum = 0, subProduct = 1;
for (int coordIdx = particleId, i = 1; coordIdx < d_particlesNumber * d_dimensions;
coordIdx += d_particlesNumber, i++)
{
float x_i = d_positions[coordIdx];
subSum += x_i * x_i;
subProduct *= cosf(x_i / i);
}
d_costs[particleId] = subSum / 40.0 + 1 - subProduct;
}
__device__ float _PsoParticles_computeCosts_Task1(float* position)
{
float subSum = 0, subProduct = 1;
for (int i = 0; i < d_dimensions; i++)
{
float x_i = position[i];
subSum += x_i * x_i;
subProduct *= cosf(x_i / (i + 1));
}
return subSum / 40.0 + 1 - subProduct;
}
__global__ void _PsoParticles_computeCosts_Task2(float* d_positions, float* d_costs)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
float subSum = 0;
int coordIdx = particleId;
float x_i = 0, x_i_1 = d_positions[coordIdx];
for (coordIdx += d_particlesNumber; coordIdx < d_particlesNumber * d_dimensions;
coordIdx += d_particlesNumber)
{
x_i = x_i_1;
x_i_1 = d_positions[coordIdx];
subSum += 100 * (x_i_1 - x_i * x_i) * (x_i_1 - x_i * x_i) +
(1 - x_i) * (1 - x_i);
}
d_costs[particleId] = subSum;
}
__device__ float _PsoParticles_computeCosts_Task2(float* position)
{
float subSum = 0;
float x_i = 0, x_i_1 = position[0];
for (int i = 1; i < d_dimensions; i++)
{
x_i = x_i_1;
x_i_1 = position[i];
subSum += 100 * (x_i_1 - x_i * x_i) * (x_i_1 - x_i * x_i) +
(1 - x_i) * (1 - x_i);
}
return subSum;
}
Particles::Particles(Options* options)
: options(options)
{
hipMalloc(&d_positions, options->particlesNumber * options->dimensions * sizeof(float));
hipMalloc(&d_costs, options->particlesNumber * sizeof(float));
hipMalloc(&d_prngStates, options->particlesNumber * sizeof(hiprandState_t));
hipLaunchKernelGGL(( _Particles_Particles_initPrng), dim3(options->gridSize), dim3(options->blockSize), 0, 0, time(NULL), d_prngStates);
}
Particles::~Particles()
{
hipFree(d_positions);
hipFree(d_positions);
hipFree(d_prngStates);
}
void Particles::print()
{
float* positions = new float[options->particlesNumber * options->dimensions * sizeof(float)];
hipMemcpy(positions, d_positions, options->particlesNumber * options->dimensions * sizeof(float),
hipMemcpyDeviceToHost);
float* costs = new float[options->particlesNumber * sizeof(float)];
hipMemcpy(costs, d_costs, options->particlesNumber * sizeof(float), hipMemcpyDeviceToHost);
for (int particleId = 0; particleId < options->particlesNumber; particleId++)
{
printf("[%d] = (", particleId);
int coordIdx;
for (coordIdx = particleId; coordIdx < options->particlesNumber * (options->dimensions - 1);
coordIdx += options->particlesNumber)
{
printf("% .2f,\t", positions[coordIdx]);
}
printf("% .2f)", positions[coordIdx]);
printf("\t f(x) =\t% .2f\n", costs[particleId]);
}
delete positions, costs;
}
void Particles::updateCosts()
{
if (options->task == options->taskType::TASK_1)
_PsoParticles_computeCosts_Task1 << <options->gridSize, options->blockSize >> > (d_positions, d_costs);
else if (options->task == options->taskType::TASK_2)
_PsoParticles_computeCosts_Task2 << <options->gridSize, options->blockSize >> > (d_positions, d_costs);
}
|
7bc778771d277c122dd78792d23220e9390e62fa.cu
|
#include "../include/Particles.cuh"
#include "../include/Options.cuh"
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <time.h>
#include <stdio.h>
extern __constant__ int d_particlesNumber;
extern __constant__ int d_dimensions;
extern __constant__ boxConstraints d_initializationBoxConstraints;
extern __constant__ boxConstraints d_solutionBoxConstraints;
__global__ void _Particles_Particles_initPrng(int seed, curandState* d_prngStates)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
curand_init(seed, particleId, 0, &d_prngStates[particleId]);
}
__global__ void _PsoParticles_computeCosts_Task1(float* d_positions, float* d_costs)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
float subSum = 0, subProduct = 1;
for (int coordIdx = particleId, i = 1; coordIdx < d_particlesNumber * d_dimensions;
coordIdx += d_particlesNumber, i++)
{
float x_i = d_positions[coordIdx];
subSum += x_i * x_i;
subProduct *= cosf(x_i / i);
}
d_costs[particleId] = subSum / 40.0 + 1 - subProduct;
}
__device__ float _PsoParticles_computeCosts_Task1(float* position)
{
float subSum = 0, subProduct = 1;
for (int i = 0; i < d_dimensions; i++)
{
float x_i = position[i];
subSum += x_i * x_i;
subProduct *= cosf(x_i / (i + 1));
}
return subSum / 40.0 + 1 - subProduct;
}
__global__ void _PsoParticles_computeCosts_Task2(float* d_positions, float* d_costs)
{
int particleId = threadIdx.x + blockIdx.x * blockDim.x;
if (particleId >= d_particlesNumber)
return;
float subSum = 0;
int coordIdx = particleId;
float x_i = 0, x_i_1 = d_positions[coordIdx];
for (coordIdx += d_particlesNumber; coordIdx < d_particlesNumber * d_dimensions;
coordIdx += d_particlesNumber)
{
x_i = x_i_1;
x_i_1 = d_positions[coordIdx];
subSum += 100 * (x_i_1 - x_i * x_i) * (x_i_1 - x_i * x_i) +
(1 - x_i) * (1 - x_i);
}
d_costs[particleId] = subSum;
}
__device__ float _PsoParticles_computeCosts_Task2(float* position)
{
float subSum = 0;
float x_i = 0, x_i_1 = position[0];
for (int i = 1; i < d_dimensions; i++)
{
x_i = x_i_1;
x_i_1 = position[i];
subSum += 100 * (x_i_1 - x_i * x_i) * (x_i_1 - x_i * x_i) +
(1 - x_i) * (1 - x_i);
}
return subSum;
}
Particles::Particles(Options* options)
: options(options)
{
cudaMalloc(&d_positions, options->particlesNumber * options->dimensions * sizeof(float));
cudaMalloc(&d_costs, options->particlesNumber * sizeof(float));
cudaMalloc(&d_prngStates, options->particlesNumber * sizeof(curandState));
_Particles_Particles_initPrng<<<options->gridSize, options->blockSize>>>(time(NULL), d_prngStates);
}
Particles::~Particles()
{
cudaFree(d_positions);
cudaFree(d_positions);
cudaFree(d_prngStates);
}
void Particles::print()
{
float* positions = new float[options->particlesNumber * options->dimensions * sizeof(float)];
cudaMemcpy(positions, d_positions, options->particlesNumber * options->dimensions * sizeof(float),
cudaMemcpyDeviceToHost);
float* costs = new float[options->particlesNumber * sizeof(float)];
cudaMemcpy(costs, d_costs, options->particlesNumber * sizeof(float), cudaMemcpyDeviceToHost);
for (int particleId = 0; particleId < options->particlesNumber; particleId++)
{
printf("[%d] = (", particleId);
int coordIdx;
for (coordIdx = particleId; coordIdx < options->particlesNumber * (options->dimensions - 1);
coordIdx += options->particlesNumber)
{
printf("% .2f,\t", positions[coordIdx]);
}
printf("% .2f)", positions[coordIdx]);
printf("\t f(x) =\t% .2f\n", costs[particleId]);
}
delete positions, costs;
}
void Particles::updateCosts()
{
if (options->task == options->taskType::TASK_1)
_PsoParticles_computeCosts_Task1 << <options->gridSize, options->blockSize >> > (d_positions, d_costs);
else if (options->task == options->taskType::TASK_2)
_PsoParticles_computeCosts_Task2 << <options->gridSize, options->blockSize >> > (d_positions, d_costs);
}
|
56c439d6a6eab54c55e62a7b20ded7b3a55536bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kern_FindLeafSinkPotential(float* sinkBuffer, float* incBuffer, float* divBuffer, float* labelBuffer, float iCC, int size)
{
int idx = CUDASTDOFFSET;
float value = incBuffer[idx] - divBuffer[idx] + labelBuffer[idx] * iCC;
if( idx < size )
{
sinkBuffer[idx] = value;
}
}
|
56c439d6a6eab54c55e62a7b20ded7b3a55536bb.cu
|
#include "includes.h"
__global__ void kern_FindLeafSinkPotential(float* sinkBuffer, float* incBuffer, float* divBuffer, float* labelBuffer, float iCC, int size)
{
int idx = CUDASTDOFFSET;
float value = incBuffer[idx] - divBuffer[idx] + labelBuffer[idx] * iCC;
if( idx < size )
{
sinkBuffer[idx] = value;
}
}
|
620f2822b7fb6536399522a9f744b9c3cd004162.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorRandom.h"
#include "THHDeviceUtils.cuh"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHTensorMath.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorRandom.cuh"
#include "THHGenerator.hpp"
#include "ATen/Config.h"
#include "ATen/hip/_curand_mtgp32_host.h"
#include <thrust/functional.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
THCGenerator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */
__host__ void initializeGenerator(THCState *state, THCGenerator* gen)
{
gen->state.gen_states = static_cast<struct hiprandStateMtgp32_t*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t)));
gen->state.kernel_params = static_cast<mtgp32_kernel_params_t*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params_t)));
}
/* Creates a new generator state given the seed. Not thread-safe. */
__host__ void createGeneratorState(THCGenerator* gen, uint64_t seed)
{
if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (hiprandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213,
gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
// seed and offset for philox
gen->state.initial_seed = seed;
gen->state.philox_seed_offset = 0;
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
std::lock_guard<std::mutex> lock(gen->mutex);
// The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->state.initial_seed);
static const size_t offset_size = sizeof(gen->state.philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->state.gen_states,
states_size, hipMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size);
memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size);
}
__global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
std::lock_guard<std::mutex> lock(gen->mutex);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->state.initial_seed);
static const size_t offset_size = sizeof(gen->state.philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
bool no_philox_seed = false;
if (THByteTensor_nElement(rng_state) == total_size - offset_size) {
no_philox_seed = true;
}
else {
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
}
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(gen->state.gen_states, THByteTensor_data(rng_state),
states_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state),
gen->state.gen_states, gen->state.kernel_params);
memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
if (!no_philox_seed) {
memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size);
}
else {
gen->state.philox_seed_offset = 0;
}
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
__device__ inline at::Half half_uniform_scale_and_shift(float x, double a, double b) {
at::Half width = ScalarConvert<double, at::Half>::to(b - a);
at::Half start = ScalarConvert<double, at::Half>::to(a);
at::Half scaled = THCNumerics<at::Half>::mul(reverse_bounds(ScalarConvert<float, at::Half>::to(x)), width);
return THCNumerics<at::Half>::add(scaled, start);
}
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
// NOTE: hiprand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_uniform, at::Half, double a, double b, float, hiprand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, at::Half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, at::Half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, at::Half, double lambda, float, hiprand_uniform, (ScalarConvert<float, at::Half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, at::Half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, at::Half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#include "generic/THCTensorRandom.cu"
#include "THHGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
|
620f2822b7fb6536399522a9f744b9c3cd004162.cu
|
#include "THCTensorRandom.h"
#include "THCDeviceUtils.cuh"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCTensorMath.h"
#include "THCReduceApplyUtils.cuh"
#include "THCTensorRandom.cuh"
#include "THCGenerator.hpp"
#include "ATen/Config.h"
#include "ATen/cuda/_curand_mtgp32_host.h"
#include <thrust/functional.h>
#include <curand.h>
#include <curand_kernel.h>
#define MAX_NUM_BLOCKS 200
#define BLOCK_SIZE 256
THCGenerator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */
__host__ void initializeGenerator(THCState *state, THCGenerator* gen)
{
gen->state.gen_states = static_cast<struct curandStateMtgp32*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32)));
gen->state.kernel_params = static_cast<mtgp32_kernel_params*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params)));
}
/* Creates a new generator state given the seed. Not thread-safe. */
__host__ void createGeneratorState(THCGenerator* gen, uint64_t seed)
{
if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (curandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213,
gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
// seed and offset for philox
gen->state.initial_seed = seed;
gen->state.philox_seed_offset = 0;
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
std::lock_guard<std::mutex> lock(gen->mutex);
// The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->state.initial_seed);
static const size_t offset_size = sizeof(gen->state.philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->state.gen_states,
states_size, cudaMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size);
memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size);
}
__global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
std::lock_guard<std::mutex> lock(gen->mutex);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->state.initial_seed);
static const size_t offset_size = sizeof(gen->state.philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
bool no_philox_seed = false;
if (THByteTensor_nElement(rng_state) == total_size - offset_size) {
no_philox_seed = true;
}
else {
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
}
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(gen->state.gen_states, THByteTensor_data(rng_state),
states_size, cudaMemcpyHostToDevice));
set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, gen->state.kernel_params);
memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
if (!no_philox_seed) {
memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size);
}
else {
gen->state.philox_seed_offset = 0;
}
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
__device__ inline at::Half half_uniform_scale_and_shift(float x, double a, double b) {
at::Half width = ScalarConvert<double, at::Half>::to(b - a);
at::Half start = ScalarConvert<double, at::Half>::to(a);
at::Half scaled = THCNumerics<at::Half>::mul(reverse_bounds(ScalarConvert<float, at::Half>::to(x)), width);
return THCNumerics<at::Half>::add(scaled, start);
}
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
// NOTE: curand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, curand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, curand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_uniform, at::Half, double a, double b, float, curand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, at::Half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, at::Half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, at::Half, double lambda, float, curand_uniform, (ScalarConvert<float, at::Half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, at::Half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, at::Half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#include "generic/THCTensorRandom.cu"
#include "THCGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
|
7bc4ecca183672b22c1622fb3f472920caf5698c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), beta->getSpecialBuffer(),
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* gradO,
NDArray* gradI, NDArray* gradG, NDArray* gradB,
const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> dxStrides = {(int)gradI->strideAt(0), (int)gradI->strideAt(1), (int)gradI->strideAt(2), (int)gradI->strideAt(3)};
std::vector<int> dzStrides = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
dxStrides.push_back((int)gradI->strideAt(4));
dzStrides.push_back((int)gradO->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dz, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dz, dataType, xRank, xShape.data(), dzStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// gradI descriptor
cudnnTensorDescriptor_t dx;
cudnnCreateTensorDescriptor(&dx);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dx, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dx, dataType, xRank, xShape.data(), dxStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradI failed", err);
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/gradG/gradB failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
err = cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
dz, gradO->getSpecialBuffer(),
dx, gradI->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), gradG->getSpecialBuffer(), gradB->getSpecialBuffer(),
epsilon,
nullptr/*mean->getSpecialBuffer()*/, nullptr/*variance->getSpecialBuffer()*/);
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnBatchNormalizationBackward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormBpCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// cudnn requires gamma and beta to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset) {
beta = new NDArray(mean);
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
if(!applyScale)
delete gamma;
if(!applyOffset)
delete beta;
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if(applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if(applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0, "BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for(int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
gradO = new NDArray(gradO->permute(perm));
gradI = new NDArray(gradI->permute(perm));
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
gradG = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset)
gradB = new NDArray(mean);
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon, axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
if(needPermut) {
delete input;
delete gradO;
delete gradI;
}
if(!applyScale) {
delete gamma;
delete gradG;
}
if(!applyOffset)
delete gradB;
return Status::OK();
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(gradG)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradG->getShapeInfo());
if(gradB)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradB->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
|
7bc4ecca183672b22c1622fb3f472920caf5698c.cu
|
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), beta->getSpecialBuffer(),
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw sd::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* gradO,
NDArray* gradI, NDArray* gradG, NDArray* gradB,
const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> dxStrides = {(int)gradI->strideAt(0), (int)gradI->strideAt(1), (int)gradI->strideAt(2), (int)gradI->strideAt(3)};
std::vector<int> dzStrides = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
dxStrides.push_back((int)gradI->strideAt(4));
dzStrides.push_back((int)gradO->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dz, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dz, dataType, xRank, xShape.data(), dzStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// gradI descriptor
cudnnTensorDescriptor_t dx;
cudnnCreateTensorDescriptor(&dx);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dx, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dx, dataType, xRank, xShape.data(), dxStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradI failed", err);
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/gradG/gradB failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
err = cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
dz, gradO->getSpecialBuffer(),
dx, gradI->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), gradG->getSpecialBuffer(), gradB->getSpecialBuffer(),
epsilon,
nullptr/*mean->getSpecialBuffer()*/, nullptr/*variance->getSpecialBuffer()*/);
if (err != 0) throw sd::cuda_exception::build("batchnormBpCUDNN: cudnnBatchNormalizationBackward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// cudnn requires gamma and beta to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset) {
beta = new NDArray(mean);
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
if(!applyScale)
delete gamma;
if(!applyOffset)
delete beta;
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if(applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if(applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0, "BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for(int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
gradO = new NDArray(gradO->permute(perm));
gradI = new NDArray(gradI->permute(perm));
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
gradG = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset)
gradB = new NDArray(mean);
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon, axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
if(needPermut) {
delete input;
delete gradO;
delete gradI;
}
if(!applyScale) {
delete gamma;
delete gradG;
}
if(!applyOffset)
delete gradB;
return Status::OK();
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(gradG)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradG->getShapeInfo());
if(gradB)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradB->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
|
a3a50326ae3edc3cfddfabd2018fbf91d5bf48c9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<SizeT> sorted_graph, SizeTVec2D node_alignments,
SizeTVec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, SizeT* node_count,
SizeT* sorted_poa, SizeT* node_id_to_pos,
SizeT* incoming_edges, uint16_t* incoming_edge_count,
SizeT* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
SizeT* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
SizeT to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
SizeTVec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A T C G A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A C C G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A T T G A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T G
* / \
* graph A C A T A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes;
SizeT* node_count;
SizeT* graph;
SizeT* node_id_to_pos;
SizeT* incoming_edges;
uint16_t* incoming_edge_count;
SizeT* outgoing_edges;
uint16_t* outgoing_edge_count;
uint16_t* incoming_edge_w;
uint16_t* node_coverage_counts;
SizeT* node_alignments;
uint16_t* node_alignment_count;
//buffers that don't need initialization
SizeT* predecessors;
int32_t* scores;
uint8_t* consensus;
uint16_t* coverage;
//default data size limits
BatchConfig batch_size;
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(hipMallocManaged((void**)&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&node_count, sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&graph, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&predecessors, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&scores, batch_size.max_nodes_per_graph * sizeof(int32_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(hipMallocManaged((void**)&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset((void**)incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost<SizeT>(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
GW_CU_CHECK_ERR(hipDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
GW_CU_CHECK_ERR(hipFree(nodes));
GW_CU_CHECK_ERR(hipFree(node_count));
GW_CU_CHECK_ERR(hipFree(graph));
GW_CU_CHECK_ERR(hipFree(node_id_to_pos));
GW_CU_CHECK_ERR(hipFree(incoming_edges));
GW_CU_CHECK_ERR(hipFree(incoming_edge_count));
GW_CU_CHECK_ERR(hipFree(outgoing_edges));
GW_CU_CHECK_ERR(hipFree(outgoing_edge_count));
GW_CU_CHECK_ERR(hipFree(incoming_edge_w));
GW_CU_CHECK_ERR(hipFree(node_coverage_counts));
GW_CU_CHECK_ERR(hipFree(node_alignments));
GW_CU_CHECK_ERR(hipFree(node_alignment_count));
GW_CU_CHECK_ERR(hipFree(predecessors));
GW_CU_CHECK_ERR(hipFree(scores));
GW_CU_CHECK_ERR(hipFree(consensus));
GW_CU_CHECK_ERR(hipFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
a3a50326ae3edc3cfddfabd2018fbf91d5bf48c9.cu
|
/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/cudapoa_generate_consensus.cuh" //generateConsensusHost, CUDAPOA_MAX_NODE_EDGES, CUDAPOA_MAX_NODE_ALIGNMENTS
#include "sorted_graph.hpp" //SortedGraph
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size
#include "gtest/gtest.h"
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
class BasicGenerateConsensus
{
public:
BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<SizeT> sorted_graph, SizeTVec2D node_alignments,
SizeTVec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w)
: graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges)
, outgoing_edge_w_(outgoing_edge_w)
, outgoing_edges_(outgoing_edges)
{
}
void get_graph_buffers(uint8_t* nodes, SizeT* node_count,
SizeT* sorted_poa, SizeT* node_id_to_pos,
SizeT* incoming_edges, uint16_t* incoming_edge_count,
SizeT* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* node_coverage_counts,
SizeT* node_alignments, uint16_t* node_alignment_count) const
{
graph_.get_nodes(nodes, node_count);
graph_.get_sorted_graph(sorted_poa);
graph_.get_node_id_to_pos(node_id_to_pos);
graph_.get_node_coverage_counts(node_coverage_counts);
graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count);
graph_.get_node_alignments(node_alignments, node_alignment_count);
get_incoming_edge_w(incoming_edge_w);
}
void get_incoming_edge_w(uint16_t* incoming_edge_w) const
{
auto outgoing_edges = graph_.get_outgoing_edges();
for (int i = 0; i < get_size(outgoing_edges); i++)
{
for (int j = 0; j < get_size(outgoing_edges[i]); j++)
{
SizeT to_node = outgoing_edges[i][j];
incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j];
}
}
}
protected:
SortedGraph graph_;
SizeTVec2D outgoing_edges_;
Uint16Vec2D outgoing_edge_w_;
};
typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair;
// create a vector of test cases
std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases()
{
std::vector<GenerateConsensusTestPair> test_cases;
/*
* T
* / \
* graph A — A A
* \ /
* A
*/
std::string ans_1 = "ATAA";
BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes
{0, 1, 2, 4, 3}, //sorted_graph
{{}, {}, {4}, {}, {2}}, //node_alignments
{{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges
{2, 2, 1, 2, 1}, //node_coverage_counts
{{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w
test_cases.emplace_back(std::move(ans_1), std::move(gc_1));
/*
* graph A — T — C — G — A
*/
std::string ans_2 = "AGCTA";
BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1}, {2}, {3}, {4}, {}}, //outgoing_edges
{1, 1, 1, 1, 1}, //node_coverage_counts
{{4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_2), std::move(gc_2));
/*
* T
* / \
* graph A — C — C — G
* \ /
* A
*/
std::string ans_3 = "GCCA";
BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes
{0, 1, 4, 5, 2, 3}, //sorted_graph
{{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments
{{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges
{3, 1, 3, 3, 1, 1}, //node_coverage_counts
{{7, 6, 5}, {4}, {3}, {}, {2}, {1}});
test_cases.emplace_back(std::move(ans_3), std::move(gc_3));
/*
* graph A — T — T — G — A
* \_____________/
*/
std::string ans_4 = "AGTTA";
BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes
{0, 1, 2, 3, 4}, //sorted_graph
{{}, {}, {}, {}, {}}, //node_alignments
{{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges
{2, 1, 1, 1, 2}, //node_coverage_counts
{{5, 4}, {3}, {2}, {1}, {}});
test_cases.emplace_back(std::move(ans_4), std::move(gc_4));
/*
* T — G
* / \
* graph A — C — A — T — A
* \ /
* T
*/
std::string ans_5 = "ATTCA";
BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes
{0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph
{{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments
{{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges
{3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts
{{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}});
test_cases.emplace_back(std::move(ans_5), std::move(gc_5));
//add more test cases below
return test_cases;
}
// host function for calling the kernel to test topsort device function.
std::string testGenerateConsensus(const BasicGenerateConsensus& obj)
{
//declare device buffer
uint8_t* nodes;
SizeT* node_count;
SizeT* graph;
SizeT* node_id_to_pos;
SizeT* incoming_edges;
uint16_t* incoming_edge_count;
SizeT* outgoing_edges;
uint16_t* outgoing_edge_count;
uint16_t* incoming_edge_w;
uint16_t* node_coverage_counts;
SizeT* node_alignments;
uint16_t* node_alignment_count;
//buffers that don't need initialization
SizeT* predecessors;
int32_t* scores;
uint8_t* consensus;
uint16_t* coverage;
//default data size limits
BatchConfig batch_size;
//allocate unified memory so they can be accessed by both host and device.
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&node_count, sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&graph, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&predecessors, batch_size.max_nodes_per_graph * sizeof(SizeT)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&scores, batch_size.max_nodes_per_graph * sizeof(int32_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&consensus, batch_size.max_consensus_size * sizeof(uint8_t)));
GW_CU_CHECK_ERR(cudaMallocManaged((void**)&coverage, batch_size.max_consensus_size * sizeof(uint16_t)));
//initialize all 'count' buffers
memset((void**)incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
memset((void**)node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t));
//calculate edge counts on host
obj.get_graph_buffers(nodes, node_count,
graph, node_id_to_pos,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, node_coverage_counts,
node_alignments, node_alignment_count);
// call the host wrapper of topsort kernel
generateConsensusTestHost<SizeT>(nodes,
*node_count,
graph,
node_id_to_pos,
incoming_edges,
incoming_edge_count,
outgoing_edges,
outgoing_edge_count,
incoming_edge_w,
predecessors,
scores,
consensus,
coverage,
node_coverage_counts,
node_alignments,
node_alignment_count,
batch_size.max_consensus_size);
GW_CU_CHECK_ERR(cudaDeviceSynchronize());
//input and output buffers are the same ones in unified memory, so the results are updated in place
//create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment
std::string res((char*)consensus);
GW_CU_CHECK_ERR(cudaFree(nodes));
GW_CU_CHECK_ERR(cudaFree(node_count));
GW_CU_CHECK_ERR(cudaFree(graph));
GW_CU_CHECK_ERR(cudaFree(node_id_to_pos));
GW_CU_CHECK_ERR(cudaFree(incoming_edges));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_count));
GW_CU_CHECK_ERR(cudaFree(outgoing_edges));
GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count));
GW_CU_CHECK_ERR(cudaFree(incoming_edge_w));
GW_CU_CHECK_ERR(cudaFree(node_coverage_counts));
GW_CU_CHECK_ERR(cudaFree(node_alignments));
GW_CU_CHECK_ERR(cudaFree(node_alignment_count));
GW_CU_CHECK_ERR(cudaFree(predecessors));
GW_CU_CHECK_ERR(cudaFree(scores));
GW_CU_CHECK_ERR(cudaFree(consensus));
GW_CU_CHECK_ERR(cudaFree(coverage));
return res;
}
using ::testing::TestWithParam;
using ::testing::ValuesIn;
class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair>
{
public:
void SetUp() {}
std::string runGenerateConsensus(const BasicGenerateConsensus& obj)
{
return testGenerateConsensus(obj);
}
};
TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness)
{
const auto test_case = GetParam();
EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second));
}
INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases()));
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
6523dbda7942937db1e45c7ec21baf9700f29ff1.hip
|
// !!! This is a file automatically generated by hipify!!!
// ---------------------------------------------------------------------------
// NWQsim: Northwest Quantum Circuit Simulation Environment
// ---------------------------------------------------------------------------
// Ang Li, Senior Computer Scientist
// Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/dm-sim
// PNNL-IPID: 32166, ECCN: EAR99, IR: PNNL-SA-161181
// BSD Lincese.
// ---------------------------------------------------------------------------
// File: dmsim_python.cpp
// Python wrapper via Pybind11 for DMSim
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <string>
#include <bitset>
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/complex.h>
#include "config.h"
#include "util.h"
#include "svsim_nvgpu_mpi_hip.cuh"
namespace py = pybind11;
using namespace NWQSim;
PYBIND11_MODULE(libsvsim, m)
{
py::class_<Simulation>(m, "Simulation")
.def(py::init<>())
//Basis Gate definition
.def("X", &Simulation::X)
.def("ID", &Simulation::ID)
.def("RZ", &Simulation::RZ)
.def("SX", &Simulation::SX)
.def("CX", &Simulation::CX)
.def("M", &Simulation::M)
.def("MA", &Simulation::MA)
.def("RESET",&Simulation::RESET)
//Simulator operations
.def("set_seed", &Simulation::set_seed)
.def("reset_sim", &Simulation::reset_sim)
.def("reset_circuit", &Simulation::reset_circuit)
.def("get_qubits", &Simulation::get_n_qubits)
.def("get_gates", &Simulation::get_n_gates)
.def("run", &Simulation::sim)
.def("measure", &Simulation::measure)
.def("measure_all",[](Simulation &s, unsigned repetition) -> py::list{
IdxType* m_rtn = s.measure_all(repetition);
py::list rtn;
for (unsigned i=0; i<repetition; i++)
{
std::string s = std::bitset<32>(m_rtn[i]).to_string();
rtn.append(s);
//rtn.append(m_rtn[i]);
}
return rtn;})
//Get result density matrix in numpy format
.def("get_sv_np", [](Simulation &s) -> py::array_t<complex<ValType>, py::array::c_style | py::array::forcecast> {
py::array_t<complex<ValType>> result = py::array_t<complex<ValType>>(s.dim);
py::buffer_info buf = result.request();
complex<double>* val = (complex<double>*)buf.ptr;
for (IdxType i=0; i<s.dim; i++)
{
val[i].real(s.sv_real_cpu[i]);
val[i].imag(s.sv_imag_cpu[i]);
}
IdxType size = (IdxType)1<<(s.n_qubits);
result.resize({size,size});
return result;
})
//Get result density matrix in nested lists with respect to Qiskit definition: https://qiskit.org/documentation/_modules/qiskit/result/result.html#Result
.def("get_sv_list", [](Simulation &s) -> py::list {
py::list result;
for (IdxType i=0; i<s.dim; i++)
{
py::list element;
element.append(s.sv_real_cpu[i]);
element.append(s.sv_imag_cpu[i]);
result.append(element);
}
return result;
})
;
}
|
6523dbda7942937db1e45c7ec21baf9700f29ff1.cu
|
// ---------------------------------------------------------------------------
// NWQsim: Northwest Quantum Circuit Simulation Environment
// ---------------------------------------------------------------------------
// Ang Li, Senior Computer Scientist
// Pacific Northwest National Laboratory(PNNL), U.S.
// Homepage: http://www.angliphd.com
// GitHub repo: http://www.github.com/pnnl/dm-sim
// PNNL-IPID: 32166, ECCN: EAR99, IR: PNNL-SA-161181
// BSD Lincese.
// ---------------------------------------------------------------------------
// File: dmsim_python.cpp
// Python wrapper via Pybind11 for DMSim
// ---------------------------------------------------------------------------
#include <stdio.h>
#include <string>
#include <bitset>
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/complex.h>
#include "config.h"
#include "util.h"
#include "svsim_nvgpu_mpi.cuh"
namespace py = pybind11;
using namespace NWQSim;
PYBIND11_MODULE(libsvsim, m)
{
py::class_<Simulation>(m, "Simulation")
.def(py::init<>())
//Basis Gate definition
.def("X", &Simulation::X)
.def("ID", &Simulation::ID)
.def("RZ", &Simulation::RZ)
.def("SX", &Simulation::SX)
.def("CX", &Simulation::CX)
.def("M", &Simulation::M)
.def("MA", &Simulation::MA)
.def("RESET",&Simulation::RESET)
//Simulator operations
.def("set_seed", &Simulation::set_seed)
.def("reset_sim", &Simulation::reset_sim)
.def("reset_circuit", &Simulation::reset_circuit)
.def("get_qubits", &Simulation::get_n_qubits)
.def("get_gates", &Simulation::get_n_gates)
.def("run", &Simulation::sim)
.def("measure", &Simulation::measure)
.def("measure_all",[](Simulation &s, unsigned repetition) -> py::list{
IdxType* m_rtn = s.measure_all(repetition);
py::list rtn;
for (unsigned i=0; i<repetition; i++)
{
std::string s = std::bitset<32>(m_rtn[i]).to_string();
rtn.append(s);
//rtn.append(m_rtn[i]);
}
return rtn;})
//Get result density matrix in numpy format
.def("get_sv_np", [](Simulation &s) -> py::array_t<complex<ValType>, py::array::c_style | py::array::forcecast> {
py::array_t<complex<ValType>> result = py::array_t<complex<ValType>>(s.dim);
py::buffer_info buf = result.request();
complex<double>* val = (complex<double>*)buf.ptr;
for (IdxType i=0; i<s.dim; i++)
{
val[i].real(s.sv_real_cpu[i]);
val[i].imag(s.sv_imag_cpu[i]);
}
IdxType size = (IdxType)1<<(s.n_qubits);
result.resize({size,size});
return result;
})
//Get result density matrix in nested lists with respect to Qiskit definition: https://qiskit.org/documentation/_modules/qiskit/result/result.html#Result
.def("get_sv_list", [](Simulation &s) -> py::list {
py::list result;
for (IdxType i=0; i<s.dim; i++)
{
py::list element;
element.append(s.sv_real_cpu[i]);
element.append(s.sv_imag_cpu[i]);
result.append(element);
}
return result;
})
;
}
|
05d34c2864446d7e06011eada1946f63f2edcb53.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "tensors/device_gpu.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "kernels/cuda_helpers.h"
namespace marian {
DeviceGPU::~DeviceGPU() {
hipSetDevice(device_);
if(data_) {
CUDA_CHECK(hipFree(data_));
}
hipDeviceSynchronize();
}
void DeviceGPU::reserve(size_t size) {
size = align(size);
hipSetDevice(device_);
UTIL_THROW_IF2(size < size_, "New size must be larger than old size");
if(data_) {
// Allocate memory by going through host memory
uint8_t *temp = new uint8_t[size_];
CUDA_CHECK(hipMemcpy(temp, data_, size_, hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(data_));
CUDA_CHECK(hipMalloc(&data_, size));
CUDA_CHECK(hipMemcpy(data_, temp, size_, hipMemcpyHostToDevice));
delete[] temp;
} else {
CUDA_CHECK(hipMalloc(&data_, size));
}
size_ = size;
}
}
|
05d34c2864446d7e06011eada1946f63f2edcb53.cu
|
#include "tensors/device_gpu.h"
#include <cuda.h>
#include <iostream>
#include "kernels/cuda_helpers.h"
namespace marian {
DeviceGPU::~DeviceGPU() {
cudaSetDevice(device_);
if(data_) {
CUDA_CHECK(cudaFree(data_));
}
cudaDeviceSynchronize();
}
void DeviceGPU::reserve(size_t size) {
size = align(size);
cudaSetDevice(device_);
UTIL_THROW_IF2(size < size_, "New size must be larger than old size");
if(data_) {
// Allocate memory by going through host memory
uint8_t *temp = new uint8_t[size_];
CUDA_CHECK(cudaMemcpy(temp, data_, size_, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(data_));
CUDA_CHECK(cudaMalloc(&data_, size));
CUDA_CHECK(cudaMemcpy(data_, temp, size_, cudaMemcpyHostToDevice));
delete[] temp;
} else {
CUDA_CHECK(cudaMalloc(&data_, size));
}
size_ = size;
}
}
|
b69aa085e11af5058ad0149a004a2f6f5817c590.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#include <thrust/pair.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "mypair.h"
#include "sprp_search.h"
#define MAX_COMPOSITE 1UL << 32
#define NUM_PAIRS 500000
#define NUM_THREADS 512
using namespace thrust;
// Helper function for modular exponentiation.
// Returns a^e (mode n)
__host__ __device__ unsigned long long modexp(unsigned long long a, unsigned long long e, unsigned long long n) {
unsigned long long res = 1;
a = a % n; // Compute a mod n first (if a > n)
while (e > 0)
{
// exponent is odd
if (e & 1)
res = (res * a) % n;
// exponent is even
e = e >> 1; // Shift right one (divide by 2)
a = (a * a) % n; // Set a = a^2 mod n
}
return res;
}
// Called each iteration of witness loop.
// Returns false if composite or true if probably prime
__host__ __device__ bool witnessTest(unsigned long long a, unsigned long long d, unsigned long long n) {
unsigned long long x = modexp(a, d, n);
if (x == 1ULL || x == n-1)
return true;
// Iterate r times (2^r * d = n - 1)
while (d != n-1) {
x = (x * x) % n;
d *= 2;
if (x == 1) {
return false;
}
if (x == n-1) {
return true;
}
}
return false;
}
// Calls witnessTest for integers "a" and "b" for a known composite number "n"
// Returns true if both integers identify "n" as prime, i.e. "n" is a psuedo-prime for a-SPRP and b-SPRP
// Returns false otherwise
__host__ __device__ bool pairSPRPTest(unsigned long long a, unsigned long long b, unsigned long long d, unsigned long long n) {
if (witnessTest(a, d, n) && witnessTest(b, d, n)) {
return true;
}
return false;
}
__host__ __device__ bool isComposite(unsigned *primes, unsigned num, MyPair &mypair) {
if (num == primes[mypair.first_prime_pos]) {
mypair.first_prime_pos += 1;
return false;
} else {
return true;
}
}
class findFirstCompositesPairFunctor
{
private:
unsigned *primes;
unsigned composite_end;
public:
__host__ __device__ findFirstCompositesPairFunctor(unsigned *d_primes, unsigned c_end) {
primes = d_primes;
composite_end = c_end;
}
__host__ __device__
void operator()(MyPair &mypair) {
bool foundComposite = false;
for (unsigned j = mypair.first_composite; j <= composite_end; j += 2) {
if (isComposite(primes, j, mypair)) {
unsigned d = j - 1;
while (d % 2 == 0) {
d /= 2;
}
if (pairSPRPTest(mypair.a, mypair.b, d, j)) {
mypair.first_composite = j;
foundComposite = true;
break;
}
}
}
if (!foundComposite) {
mypair.first_composite = composite_end;
}
}
};
void findFirstComposites(std::vector<MyPair> &pairs, std::vector<unsigned> &primes, unsigned composite_end) {
if (pairs.empty()) {
return;
}
device_vector<MyPair> d_pairs(pairs);
device_vector<unsigned> d_primes(primes);
unsigned *d_primes_ptr = thrust::raw_pointer_cast(d_primes.data());
for_each(thrust::device, d_pairs.begin(), d_pairs.end(), findFirstCompositesPairFunctor(d_primes_ptr, composite_end));
thrust::copy(d_pairs.begin(), d_pairs.end(), pairs.begin());
}
|
b69aa085e11af5058ad0149a004a2f6f5817c590.cu
|
#include <thrust/host_vector.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#include <thrust/pair.h>
#include <cuda.h>
#include <vector>
#include "mypair.h"
#include "sprp_search.h"
#define MAX_COMPOSITE 1UL << 32
#define NUM_PAIRS 500000
#define NUM_THREADS 512
using namespace thrust;
// Helper function for modular exponentiation.
// Returns a^e (mode n)
__host__ __device__ unsigned long long modexp(unsigned long long a, unsigned long long e, unsigned long long n) {
unsigned long long res = 1;
a = a % n; // Compute a mod n first (if a > n)
while (e > 0)
{
// exponent is odd
if (e & 1)
res = (res * a) % n;
// exponent is even
e = e >> 1; // Shift right one (divide by 2)
a = (a * a) % n; // Set a = a^2 mod n
}
return res;
}
// Called each iteration of witness loop.
// Returns false if composite or true if probably prime
__host__ __device__ bool witnessTest(unsigned long long a, unsigned long long d, unsigned long long n) {
unsigned long long x = modexp(a, d, n);
if (x == 1ULL || x == n-1)
return true;
// Iterate r times (2^r * d = n - 1)
while (d != n-1) {
x = (x * x) % n;
d *= 2;
if (x == 1) {
return false;
}
if (x == n-1) {
return true;
}
}
return false;
}
// Calls witnessTest for integers "a" and "b" for a known composite number "n"
// Returns true if both integers identify "n" as prime, i.e. "n" is a psuedo-prime for a-SPRP and b-SPRP
// Returns false otherwise
__host__ __device__ bool pairSPRPTest(unsigned long long a, unsigned long long b, unsigned long long d, unsigned long long n) {
if (witnessTest(a, d, n) && witnessTest(b, d, n)) {
return true;
}
return false;
}
__host__ __device__ bool isComposite(unsigned *primes, unsigned num, MyPair &mypair) {
if (num == primes[mypair.first_prime_pos]) {
mypair.first_prime_pos += 1;
return false;
} else {
return true;
}
}
class findFirstCompositesPairFunctor
{
private:
unsigned *primes;
unsigned composite_end;
public:
__host__ __device__ findFirstCompositesPairFunctor(unsigned *d_primes, unsigned c_end) {
primes = d_primes;
composite_end = c_end;
}
__host__ __device__
void operator()(MyPair &mypair) {
bool foundComposite = false;
for (unsigned j = mypair.first_composite; j <= composite_end; j += 2) {
if (isComposite(primes, j, mypair)) {
unsigned d = j - 1;
while (d % 2 == 0) {
d /= 2;
}
if (pairSPRPTest(mypair.a, mypair.b, d, j)) {
mypair.first_composite = j;
foundComposite = true;
break;
}
}
}
if (!foundComposite) {
mypair.first_composite = composite_end;
}
}
};
void findFirstComposites(std::vector<MyPair> &pairs, std::vector<unsigned> &primes, unsigned composite_end) {
if (pairs.empty()) {
return;
}
device_vector<MyPair> d_pairs(pairs);
device_vector<unsigned> d_primes(primes);
unsigned *d_primes_ptr = thrust::raw_pointer_cast(d_primes.data());
for_each(thrust::device, d_pairs.begin(), d_pairs.end(), findFirstCompositesPairFunctor(d_primes_ptr, composite_end));
thrust::copy(d_pairs.begin(), d_pairs.end(), pairs.begin());
}
|
da664394a439c9973ca2a910c2250987be94e8a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_NC(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint K, float rcpK, int relu)
{
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
// Mean
V v_mean1, v_mean2;
ew_zero(v_mean1);
ew_zero(v_mean2);
#pragma unroll 4
for (uint k = tid, offsetX = offset; k < K; k += THREADS, offsetX += THREADS)
{
// Single pass over X to compute mean and variance
// var(x) == mean(x**2) - mean(x)**2
V x = load(add_ptr_u(X, offsetX));
v_mean1 = ew_add(v_mean1, x);
v_mean2 = ew_add(v_mean2, ew_sqr(x));
}
float2 stats;
stats.x = ew_sum(v_mean1) * rcpK;
stats.y = ew_sum(v_mean2) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// if using more than 1 warp, further reduced with shared memory
if (THREADS > 32)
{
__shared__ float2 Share[32];
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this first warp
for (int i = THREADS/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
// broadcast result to all threads
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf(precise_sub(stats.y, ew_sqr(mean)) + epsilon);
if (tid == 0)
{
Mean[n] = mean;
Rstd[n] = rstd;
}
// Norm/Gain/Bias
#pragma unroll 4
for (uint k = tid; k < K; k += THREADS, offset += THREADS)
{
V x = load(add_ptr_u(X, offset));
V g = load(G, k);
V b = load(B, k);
V xhat = ew_mul(ew_sub(x, mean), rstd);
V y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store(add_ptr_u(Y, offset), y);
}
}
template <typename T, typename V>
bool LayerNormForward_NC(hipStream_t stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
dim3 grid(N, 1, 1);
if ((K & 3) == 0)
{
K >>= 2; // use vector loads
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_NC<V,float4,256>), dim3(grid), dim3(256),0,stream, Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_NC<V,float4, 32>), dim3(grid), dim3(32),0,stream, Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
}
else
{
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_NC<T,float ,256>), dim3(grid), dim3(256),0,stream, y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_NC<T,float , 32>), dim3(grid), dim3(32),0,stream, y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormForward_NC<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T, int U>
__global__ void __launch_bounds__(32) layer_norm_dg_db_NC(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, int N, int relu)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int shift = 5 - U; // 4
int mask = (1 << shift) - 1; // 15
int k = (bid << shift) + (tid & mask); // b*16 + 0-15
int n0 = (tid >> shift) << 2; // 0,4
int nk = n0*K + k;
bool b = k < K;
int strideK = K << (2 + U);
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = Gain[k];
bias = Bias[k];
}
for (int n = n0; n < N; n += (4 << U))
{
int n1 = n + 1;
int n2 = n + 2;
int n3 = n + 3;
int nk1 = nk + K;
int nk2 = nk1 + K;
int nk3 = nk2 + K;
float x0 = load( X, nk, b);
float x1 = load( X, nk1, b && (n1 < N));
float x2 = load( X, nk2, b && (n2 < N));
float x3 = load( X, nk3, b && (n3 < N));
float dy0 = load(DY, nk, b);
float dy1 = load(DY, nk1, b && (n1 < N));
float dy2 = load(DY, nk2, b && (n2 < N));
float dy3 = load(DY, nk3, b && (n3 < N));
float mean0 = Mean[n];
float rstd0 = Rstd[n];
float mean1 = 0.0f, rstd1 = 0.0f;
float mean2 = 0.0f, rstd2 = 0.0f;
float mean3 = 0.0f, rstd3 = 0.0f;
if (n1 < N)
{
mean1 = Mean[n1];
rstd1 = Rstd[n1];
}
if (n2 < N)
{
mean2 = Mean[n2];
rstd2 = Rstd[n2];
}
if (n3 < N)
{
mean3 = Mean[n3];
rstd3 = Rstd[n3];
}
float xhat0 = (x0 - mean0) * rstd0;
float xhat1 = (x1 - mean1) * rstd1;
float xhat2 = (x2 - mean2) * rstd2;
float xhat3 = (x3 - mean3) * rstd3;
if (relu)
{
dy0 = ew_relu_grad(dy0, xhat0 * gain + bias);
dy1 = ew_relu_grad(dy1, xhat1 * gain + bias);
dy2 = ew_relu_grad(dy2, xhat2 * gain + bias);
dy3 = ew_relu_grad(dy3, xhat3 * gain + bias);
}
dg += dy0 * xhat0;
dg += dy1 * xhat1;
dg += dy2 * xhat2;
dg += dy3 * xhat3;
db += dy0;
db += dy1;
db += dy2;
db += dy3;
nk += strideK;
}
#pragma unroll
for (int i = 16; i > (1 << (4-U)); i >>= 1)
{
dg += shfl_xor(dg, i);
db += shfl_xor(db, i);
}
store(DG, dg, k, b);
store(DB, db, k, b);
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_NC(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, float rcpK, int relu)
{
__shared__ float Share1[32];
__shared__ float Share2[32];
int tid = threadIdx.x;
int n = blockIdx.x;
int offset = n*K + tid;
float mean = Mean[n];
float rstd = Rstd[n];
const T* X1 = X + offset;
const T* Y1 = DY + offset;
V v_sum1, v_sum2;
ew_zero(v_sum1);
ew_zero(v_sum2);
for (int k = tid; k < K; k += THREADS)
{
V x = load(X1);
V dy = load(Y1);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
v_sum1 = ew_add(v_sum1, ew_mul(dy, xhat));
v_sum2 = ew_add(v_sum2, dy);
X1 += THREADS;
Y1 += THREADS;
}
float sum1 = ew_sum(v_sum1);
float sum2 = ew_sum(v_sum2);
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share1[tid >> 5] = sum1;
Share2[tid >> 5] = sum2;
}
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum1 = Share1[tid];
sum2 = Share2[tid];
// reduce within this last warp
#pragma unroll
for (int i = THREADS/64; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// outputs final reduction to shared
Share1[tid] = sum1;
Share2[tid] = sum2;
}
__syncthreads();
// broadcast result to all threads
sum1 = Share1[0];
sum2 = Share2[0];
X += offset;
DY += offset;
DX += offset;
for (int k = tid; k < K; k += THREADS)
{
V x = load(X);
V dy = load(DY);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store(DX, dx);
X += THREADS;
DY += THREADS;
DX += THREADS;
}
}
template <typename T, typename V>
bool LayerNormBackward_NC(hipStream_t stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, int K, int N, float rcpK, int relu)
{
int K32 = K >> 5;
// optimize layer_norm_backward1 for highest occupancy
if (K32 >= 28*16)
{
int gridK = K32 + ((K & 31) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,0>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*8)
{
int gridK = (K >> 4) + ((K & 15) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,1>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*4)
{
int gridK = (K >> 3) + ((K & 7) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,2>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else
{
int gridK = (K >> 2) + ((K & 3) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,3>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
if ((K & 3) == 0)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* Gain = (const float4*)g;
const float4* Bias = (const float4*)b;
K >>= 2;
//if (K >= 1024)
// layer_norm_dx_NC<VB,VF,float4,1024><<<N,1024,0,stream>>>(DX, DY, X, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_dx_NC<V,float4, 256>), dim3(N), dim3(256),0,stream, DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_dx_NC<V,float4, 64>), dim3(N), dim3(64),0,stream, DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
}
else
{
//if (K >= 1024)
// layer_norm_dx_NC<B,F,float,1024><<<N,1024,0,stream>>>(dx, (const B*)dx, x, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_dx_NC<T,float, 256>), dim3(N), dim3(256),0,stream, dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_dx_NC<T,float, 64>), dim3(N), dim3(64),0,stream, dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormBackward_NC<float,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_nc(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint n = blockIdx.x;
uint s = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
// Load X
V xval[U];
X = add_ptr_u(X, offset);
for (int i = 0; i < U; i++)
xval[i] = load(X, i*32, t + i*32 < K);
// Begin mean/variance reductions
V mean1[U], mean2[U];
for (int i = 0; i < U; i++)
{
mean1[i] = xval[i];
mean2[i] = ew_sqr(xval[i]);
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
mean1[i] = ew_add(mean1[i], mean1[i+j]);
mean2[i] = ew_add(mean2[i], mean2[i+j]);
}
float2 stats;
stats.x = ew_sum(mean1[0]) * rcpK;
stats.y = ew_sum(mean2[0]) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf(precise_sub(stats.y, ew_sqr(mean)) + epsilon);
if (tid == 0)
{
__stg(add_ptr_u(Mean, m), mean);
__stg(add_ptr_u(Rstd, m), rstd);
}
// Load Gain/Bias
G = add_ptr_u(G, k);
B = add_ptr_u(B, k);
V gain[U], bias[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
gain[i] = load(G, i*32, b);
bias[i] = load(B, i*32, b);
}
// Compute and output norm
Y = add_ptr_u(Y, offset);
for (int i = 0; i < U; i++)
{
V xhat = ew_mul(ew_sub(xval[i], mean), rstd);
V y = ew_add(ew_mul(xhat, gain[i]), bias[i]);
if (relu)
y = ew_relu(y);
store(Y, y, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedForward_NC(hipStream_t stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
dim3 grid(N, S, 1);
if ((K & 3) == 0)
{
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_nc<V,float4,2>), dim3(grid),dim3(threads),0,stream, Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_nc<V,float4,1>), dim3(grid),dim3(threads),0,stream, Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_nc<T,float,8>), dim3(grid),dim3(threads),0,stream, y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_nc<T,float,4>), dim3(grid),dim3(threads),0,stream, y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedForward_NC<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T>
__global__ void __launch_bounds__(32) layer_norm_segmented_dg_db_nc(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint SKz, uint K, int relu)
{
uint tid = threadIdx.x;
uint bn = blockIdx.x;
uint bk = blockIdx.y;
uint bs = blockIdx.z;
uint t = bk*32 + tid;
uint k = bs*K + t;
bool b = t < K;
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = ldg(add_ptr_u(Gain, k));
bias = ldg(add_ptr_u(Bias, k));
}
#pragma unroll 1
for (uint n = bn, m = bs*N + bn, nk = bn*SK + k; n < N; n += gridDim.x, m += gridDim.x, nk += SKz)
{
float x = load(add_ptr_u(X, nk), 0, b);
float dy = load(add_ptr_u(DY, nk), 0, b);
float mean = load(add_ptr_u(Mean, m));
float rstd = load(add_ptr_u(Rstd, m));
float xhat = (x - mean) * rstd;
if (relu)
dy = ew_relu_grad(dy, xhat * gain + bias);
dg += dy * xhat;
db += dy;
}
if (b)
{
DG = add_ptr_u(DG, k);
DB = add_ptr_u(DB, k);
if (gridDim.x == 1)
{
__stg(DG, dg);
__stg(DB, db);
}
else
{
atomicRed(DG, dg);
atomicRed(DB, db);
}
}
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_dx_nc(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint n = blockIdx.x;
uint s = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
float mean = ldg(add_ptr_u(Mean, m));
float rstd = ldg(add_ptr_u(Rstd, m));
X = add_ptr_u(X, offset);
DY = add_ptr_u(DY, offset);
Gain = add_ptr_u(Gain, k);
V x[U], dy[U], gain[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
x[i] = load(X, i*32, b);
dy[i] = load(DY, i*32, b);
gain[i] = load(Gain, i*32, b);
}
V xhat[U];
if (relu)
{
Bias = add_ptr_u(Bias, k);
for (int i = 0; i < U; i++)
{
V bias = load(Bias, i*32, t + i*32 < K);
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
dy[i] = ew_relu_grad(dy[i], ew_add(ew_mul(xhat[i], gain[i]), bias));
}
}
else
{
for (int i = 0; i < U; i++)
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
}
V sum1[U], sum2[U];
for (int i = 0; i < U; i++)
{
dy[i] = ew_mul(dy[i], gain[i]);
sum1[i] = ew_mul(dy[i], xhat[i]);
sum2[i] = dy[i];
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
sum1[i] = ew_add(sum1[i], sum1[i+j]);
sum2[i] = ew_add(sum2[i], sum2[i+j]);
}
float2 sums;
sums.x = ew_sum(sum1[0]);
sums.y = ew_sum(sum2[0]);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = sums;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sums = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// final reduction to shared
Share[tid] = sums;
}
__syncthreads();
sums = Share[0];
}
// Compute and store dx
DX = add_ptr_u(DX, offset);
for (int i = 0; i < U; i++)
{
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy[i], ew_mul(ew_add(ew_mul(xhat[i], sums.x), sums.y), rcpK)), rstd);
store(DX, dx, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedBackward_NC(hipStream_t stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics)
{
uint gridK = CEIL_DIV(K, 32);
uint gridN = 1;
if (atomics)
{
uint blocksK = gridK * S;
while (gridN < (N>>3) && gridN * blocksK < 32*SMs) gridN += 1;
if (gridN * blocksK > 32*SMs && gridN > 1) gridN -= 1;
if (gridN > 1)
{
hipMemsetAsync((hipDeviceptr_t)dg, 0, S*K, stream);
hipMemsetAsync((hipDeviceptr_t)db, 0, S*K, stream);
}
}
hipLaunchKernelGGL(( layer_norm_segmented_dg_db_nc<T>), dim3(dim3(gridN,gridK,S)),dim3(32),0,stream, dg, db, dy, x, g, b, mean, rstd, N, S*K, S*K*gridN, K, relu);
dim3 grid(N, S, 1);
if ((K & 3) == 0 && K >= 512)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<V,float4,2>), dim3(grid),dim3(threads),0,stream, DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<V,float4,1>), dim3(grid),dim3(threads),0,stream, DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,8>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else if (K >= 512)
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,4>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*1) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,1>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedBackward_NC<float,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
template bool LayerNormSegmentedBackward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
template bool LayerNormSegmentedBackward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
#endif // GOOGLE_CUDA
|
da664394a439c9973ca2a910c2250987be94e8a7.cu
|
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_NC(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint K, float rcpK, int relu)
{
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
// Mean
V v_mean1, v_mean2;
ew_zero(v_mean1);
ew_zero(v_mean2);
#pragma unroll 4
for (uint k = tid, offsetX = offset; k < K; k += THREADS, offsetX += THREADS)
{
// Single pass over X to compute mean and variance
// var(x) == mean(x**2) - mean(x)**2
V x = load(add_ptr_u(X, offsetX));
v_mean1 = ew_add(v_mean1, x);
v_mean2 = ew_add(v_mean2, ew_sqr(x));
}
float2 stats;
stats.x = ew_sum(v_mean1) * rcpK;
stats.y = ew_sum(v_mean2) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// if using more than 1 warp, further reduced with shared memory
if (THREADS > 32)
{
__shared__ float2 Share[32];
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this first warp
for (int i = THREADS/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
// broadcast result to all threads
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf(precise_sub(stats.y, ew_sqr(mean)) + epsilon);
if (tid == 0)
{
Mean[n] = mean;
Rstd[n] = rstd;
}
// Norm/Gain/Bias
#pragma unroll 4
for (uint k = tid; k < K; k += THREADS, offset += THREADS)
{
V x = load(add_ptr_u(X, offset));
V g = load(G, k);
V b = load(B, k);
V xhat = ew_mul(ew_sub(x, mean), rstd);
V y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store(add_ptr_u(Y, offset), y);
}
}
template <typename T, typename V>
bool LayerNormForward_NC(CUstream stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
dim3 grid(N, 1, 1);
if ((K & 3) == 0)
{
K >>= 2; // use vector loads
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
layer_norm_NC<V,float4,256><<<grid, 256,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
else
layer_norm_NC<V,float4, 32><<<grid, 32,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
}
else
{
if (K >= 256)
layer_norm_NC<T,float ,256><<<grid, 256,0,stream>>>(y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
else
layer_norm_NC<T,float , 32><<<grid, 32,0,stream>>>(y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormForward_NC<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T, int U>
__global__ void __launch_bounds__(32) layer_norm_dg_db_NC(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, int N, int relu)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int shift = 5 - U; // 4
int mask = (1 << shift) - 1; // 15
int k = (bid << shift) + (tid & mask); // b*16 + 0-15
int n0 = (tid >> shift) << 2; // 0,4
int nk = n0*K + k;
bool b = k < K;
int strideK = K << (2 + U);
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = Gain[k];
bias = Bias[k];
}
for (int n = n0; n < N; n += (4 << U))
{
int n1 = n + 1;
int n2 = n + 2;
int n3 = n + 3;
int nk1 = nk + K;
int nk2 = nk1 + K;
int nk3 = nk2 + K;
float x0 = load( X, nk, b);
float x1 = load( X, nk1, b && (n1 < N));
float x2 = load( X, nk2, b && (n2 < N));
float x3 = load( X, nk3, b && (n3 < N));
float dy0 = load(DY, nk, b);
float dy1 = load(DY, nk1, b && (n1 < N));
float dy2 = load(DY, nk2, b && (n2 < N));
float dy3 = load(DY, nk3, b && (n3 < N));
float mean0 = Mean[n];
float rstd0 = Rstd[n];
float mean1 = 0.0f, rstd1 = 0.0f;
float mean2 = 0.0f, rstd2 = 0.0f;
float mean3 = 0.0f, rstd3 = 0.0f;
if (n1 < N)
{
mean1 = Mean[n1];
rstd1 = Rstd[n1];
}
if (n2 < N)
{
mean2 = Mean[n2];
rstd2 = Rstd[n2];
}
if (n3 < N)
{
mean3 = Mean[n3];
rstd3 = Rstd[n3];
}
float xhat0 = (x0 - mean0) * rstd0;
float xhat1 = (x1 - mean1) * rstd1;
float xhat2 = (x2 - mean2) * rstd2;
float xhat3 = (x3 - mean3) * rstd3;
if (relu)
{
dy0 = ew_relu_grad(dy0, xhat0 * gain + bias);
dy1 = ew_relu_grad(dy1, xhat1 * gain + bias);
dy2 = ew_relu_grad(dy2, xhat2 * gain + bias);
dy3 = ew_relu_grad(dy3, xhat3 * gain + bias);
}
dg += dy0 * xhat0;
dg += dy1 * xhat1;
dg += dy2 * xhat2;
dg += dy3 * xhat3;
db += dy0;
db += dy1;
db += dy2;
db += dy3;
nk += strideK;
}
#pragma unroll
for (int i = 16; i > (1 << (4-U)); i >>= 1)
{
dg += shfl_xor(dg, i);
db += shfl_xor(db, i);
}
store(DG, dg, k, b);
store(DB, db, k, b);
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_NC(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, float rcpK, int relu)
{
__shared__ float Share1[32];
__shared__ float Share2[32];
int tid = threadIdx.x;
int n = blockIdx.x;
int offset = n*K + tid;
float mean = Mean[n];
float rstd = Rstd[n];
const T* X1 = X + offset;
const T* Y1 = DY + offset;
V v_sum1, v_sum2;
ew_zero(v_sum1);
ew_zero(v_sum2);
for (int k = tid; k < K; k += THREADS)
{
V x = load(X1);
V dy = load(Y1);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
v_sum1 = ew_add(v_sum1, ew_mul(dy, xhat));
v_sum2 = ew_add(v_sum2, dy);
X1 += THREADS;
Y1 += THREADS;
}
float sum1 = ew_sum(v_sum1);
float sum2 = ew_sum(v_sum2);
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share1[tid >> 5] = sum1;
Share2[tid >> 5] = sum2;
}
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum1 = Share1[tid];
sum2 = Share2[tid];
// reduce within this last warp
#pragma unroll
for (int i = THREADS/64; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// outputs final reduction to shared
Share1[tid] = sum1;
Share2[tid] = sum2;
}
__syncthreads();
// broadcast result to all threads
sum1 = Share1[0];
sum2 = Share2[0];
X += offset;
DY += offset;
DX += offset;
for (int k = tid; k < K; k += THREADS)
{
V x = load(X);
V dy = load(DY);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store(DX, dx);
X += THREADS;
DY += THREADS;
DX += THREADS;
}
}
template <typename T, typename V>
bool LayerNormBackward_NC(CUstream stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, int K, int N, float rcpK, int relu)
{
int K32 = K >> 5;
// optimize layer_norm_backward1 for highest occupancy
if (K32 >= 28*16)
{
int gridK = K32 + ((K & 31) != 0);
layer_norm_dg_db_NC<T,0><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*8)
{
int gridK = (K >> 4) + ((K & 15) != 0);
layer_norm_dg_db_NC<T,1><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*4)
{
int gridK = (K >> 3) + ((K & 7) != 0);
layer_norm_dg_db_NC<T,2><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else
{
int gridK = (K >> 2) + ((K & 3) != 0);
layer_norm_dg_db_NC<T,3><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
if ((K & 3) == 0)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* Gain = (const float4*)g;
const float4* Bias = (const float4*)b;
K >>= 2;
//if (K >= 1024)
// layer_norm_dx_NC<VB,VF,float4,1024><<<N,1024,0,stream>>>(DX, DY, X, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
layer_norm_dx_NC<V,float4, 256><<<N, 256,0,stream>>>(DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
else
layer_norm_dx_NC<V,float4, 64><<<N, 64,0,stream>>>(DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
}
else
{
//if (K >= 1024)
// layer_norm_dx_NC<B,F,float,1024><<<N,1024,0,stream>>>(dx, (const B*)dx, x, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
layer_norm_dx_NC<T,float, 256><<<N, 256,0,stream>>>(dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
else
layer_norm_dx_NC<T,float, 64><<<N, 64,0,stream>>>(dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormBackward_NC<float,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_nc(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint n = blockIdx.x;
uint s = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
// Load X
V xval[U];
X = add_ptr_u(X, offset);
for (int i = 0; i < U; i++)
xval[i] = load(X, i*32, t + i*32 < K);
// Begin mean/variance reductions
V mean1[U], mean2[U];
for (int i = 0; i < U; i++)
{
mean1[i] = xval[i];
mean2[i] = ew_sqr(xval[i]);
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
mean1[i] = ew_add(mean1[i], mean1[i+j]);
mean2[i] = ew_add(mean2[i], mean2[i+j]);
}
float2 stats;
stats.x = ew_sum(mean1[0]) * rcpK;
stats.y = ew_sum(mean2[0]) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf(precise_sub(stats.y, ew_sqr(mean)) + epsilon);
if (tid == 0)
{
__stg(add_ptr_u(Mean, m), mean);
__stg(add_ptr_u(Rstd, m), rstd);
}
// Load Gain/Bias
G = add_ptr_u(G, k);
B = add_ptr_u(B, k);
V gain[U], bias[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
gain[i] = load(G, i*32, b);
bias[i] = load(B, i*32, b);
}
// Compute and output norm
Y = add_ptr_u(Y, offset);
for (int i = 0; i < U; i++)
{
V xhat = ew_mul(ew_sub(xval[i], mean), rstd);
V y = ew_add(ew_mul(xhat, gain[i]), bias[i]);
if (relu)
y = ew_relu(y);
store(Y, y, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedForward_NC(CUstream stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
dim3 grid(N, S, 1);
if ((K & 3) == 0)
{
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_nc<V,float4,2><<<grid,threads,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_nc<V,float4,1><<<grid,threads,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_nc<T,float,8><<<grid,threads,0,stream>>>(y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_nc<T,float,4><<<grid,threads,0,stream>>>(y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedForward_NC<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T>
__global__ void __launch_bounds__(32) layer_norm_segmented_dg_db_nc(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint SKz, uint K, int relu)
{
uint tid = threadIdx.x;
uint bn = blockIdx.x;
uint bk = blockIdx.y;
uint bs = blockIdx.z;
uint t = bk*32 + tid;
uint k = bs*K + t;
bool b = t < K;
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = ldg(add_ptr_u(Gain, k));
bias = ldg(add_ptr_u(Bias, k));
}
#pragma unroll 1
for (uint n = bn, m = bs*N + bn, nk = bn*SK + k; n < N; n += gridDim.x, m += gridDim.x, nk += SKz)
{
float x = load(add_ptr_u(X, nk), 0, b);
float dy = load(add_ptr_u(DY, nk), 0, b);
float mean = load(add_ptr_u(Mean, m));
float rstd = load(add_ptr_u(Rstd, m));
float xhat = (x - mean) * rstd;
if (relu)
dy = ew_relu_grad(dy, xhat * gain + bias);
dg += dy * xhat;
db += dy;
}
if (b)
{
DG = add_ptr_u(DG, k);
DB = add_ptr_u(DB, k);
if (gridDim.x == 1)
{
__stg(DG, dg);
__stg(DB, db);
}
else
{
atomicRed(DG, dg);
atomicRed(DB, db);
}
}
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_dx_nc(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint n = blockIdx.x;
uint s = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
float mean = ldg(add_ptr_u(Mean, m));
float rstd = ldg(add_ptr_u(Rstd, m));
X = add_ptr_u(X, offset);
DY = add_ptr_u(DY, offset);
Gain = add_ptr_u(Gain, k);
V x[U], dy[U], gain[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
x[i] = load(X, i*32, b);
dy[i] = load(DY, i*32, b);
gain[i] = load(Gain, i*32, b);
}
V xhat[U];
if (relu)
{
Bias = add_ptr_u(Bias, k);
for (int i = 0; i < U; i++)
{
V bias = load(Bias, i*32, t + i*32 < K);
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
dy[i] = ew_relu_grad(dy[i], ew_add(ew_mul(xhat[i], gain[i]), bias));
}
}
else
{
for (int i = 0; i < U; i++)
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
}
V sum1[U], sum2[U];
for (int i = 0; i < U; i++)
{
dy[i] = ew_mul(dy[i], gain[i]);
sum1[i] = ew_mul(dy[i], xhat[i]);
sum2[i] = dy[i];
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
sum1[i] = ew_add(sum1[i], sum1[i+j]);
sum2[i] = ew_add(sum2[i], sum2[i+j]);
}
float2 sums;
sums.x = ew_sum(sum1[0]);
sums.y = ew_sum(sum2[0]);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = sums;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sums = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// final reduction to shared
Share[tid] = sums;
}
__syncthreads();
sums = Share[0];
}
// Compute and store dx
DX = add_ptr_u(DX, offset);
for (int i = 0; i < U; i++)
{
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy[i], ew_mul(ew_add(ew_mul(xhat[i], sums.x), sums.y), rcpK)), rstd);
store(DX, dx, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedBackward_NC(CUstream stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics)
{
uint gridK = CEIL_DIV(K, 32);
uint gridN = 1;
if (atomics)
{
uint blocksK = gridK * S;
while (gridN < (N>>3) && gridN * blocksK < 32*SMs) gridN += 1;
if (gridN * blocksK > 32*SMs && gridN > 1) gridN -= 1;
if (gridN > 1)
{
cuMemsetD32Async((CUdeviceptr)dg, 0, S*K, stream);
cuMemsetD32Async((CUdeviceptr)db, 0, S*K, stream);
}
}
layer_norm_segmented_dg_db_nc<T><<<dim3(gridN,gridK,S),32,0,stream>>>(dg, db, dy, x, g, b, mean, rstd, N, S*K, S*K*gridN, K, relu);
dim3 grid(N, S, 1);
if ((K & 3) == 0 && K >= 512)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_dx_nc<V,float4,2><<<grid,threads,0,stream>>>(DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_dx_nc<V,float4,1><<<grid,threads,0,stream>>>(DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,8><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else if (K >= 512)
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,4><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*1) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,1><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedBackward_NC<float,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
template bool LayerNormSegmentedBackward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
template bool LayerNormSegmentedBackward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu, int atomics);
#endif // GOOGLE_CUDA
|
078402513ddcc5ee9ea2d17b00ceb1cc07f35c5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
Name: GPU_KNN_LBT.cu
Desc: This file contains the KNN kd-tree GPU kernel
for a left-balanced array layout
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _GPU_KNN_LBT_CU_
#define _GPU_KNN_LBT_CU_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTree_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_KNN_2D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_2D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float2 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_2D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float2 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int prevAxis, currAxis, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float diff, diff2, diffDist2;
float * queryVals;
int tidx, currRow, currCol, qidx;
int width, height, nElems;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
height = gridDim.y * blockDim.y;
nElems = height * width;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1;
rightIdx = leftIdx + 1;
nextAxis = ((currAxis == 1u) ? 0u : 1u);
prevAxis = ((currAxis == 0u) ? 1u : 0u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_3D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_3D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_3D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_3D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float4 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int leftIdx, rightIdx;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, dz, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 2u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 2u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_4D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_4D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_4D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_4D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float4 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 3u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 3u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_6D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_6D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPU_Point6D * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_6D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_6D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPU_Point6D queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw, ds, dt;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
//float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Read from slow RAM memory into faster shared memory
queryPoints[tidx] = qps[qidx];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 5u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 5u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryPoints[tidx].pos[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryPoints[tidx].pos[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
dz = currNodes[tidx].pos[2] - queryPoints[tidx].pos[2];
dw = currNodes[tidx].pos[3] - queryPoints[tidx].pos[3];
ds = currNodes[tidx].pos[4] - queryPoints[tidx].pos[4];
dt = currNodes[tidx].pos[5] - queryPoints[tidx].pos[5];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw) + (ds*ds) + (dt*dt);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
#endif // _GPU_KNN_LBT_CU_
|
078402513ddcc5ee9ea2d17b00ceb1cc07f35c5d.cu
|
/*-----------------------------------------------------------------------------
Name: GPU_KNN_LBT.cu
Desc: This file contains the KNN kd-tree GPU kernel
for a left-balanced array layout
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
#ifndef _GPU_KNN_LBT_CU_
#define _GPU_KNN_LBT_CU_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTree_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_KNN_2D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_2D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float2 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_2D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float2 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int prevAxis, currAxis, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float diff, diff2, diffDist2;
float * queryVals;
int tidx, currRow, currCol, qidx;
int width, height, nElems;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
height = gridDim.y * blockDim.y;
nElems = height * width;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1;
rightIdx = leftIdx + 1;
nextAxis = ((currAxis == 1u) ? 0u : 1u);
prevAxis = ((currAxis == 0u) ? 1u : 0u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].x;
dy = currNodes[tidx].pos[1] - queryPoints[tidx].y;
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_3D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_3D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_3D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_3D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float4 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int leftIdx, rightIdx;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, dz, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 2u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 2u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_4D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_4D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
float4 * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_4D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_4D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ float4 queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point
// read from slower RAM into faster shared memory
queryPoints[tidx] = qps[qidx];
queryVals = (float *)(&queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 3u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 3u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
/*---------------------------------------------------------
Name: GPU_KNN_6D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes: 'S' is stored and implemented via a
static balanced cyclical kd-tree.
---------------------------------------------------------*/
__global__ void
GPU_KNN_6D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPU_Point6D * qps, // IN: query points to compute k nearest neighbors for...
GPUNode_6D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in kd-tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_6D_LBT currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPU_Point6D queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw, ds, dt;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
//float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Read from slow RAM memory into faster shared memory
queryPoints[tidx] = qps[qidx];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 5u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 5u : currAxis-1u);
// Early Exit Check
if (currInOut == 1u) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = queryPoints[tidx].pos[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// read from slower RAM into faster shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryPoints[tidx].pos[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
dz = currNodes[tidx].pos[2] - queryPoints[tidx].pos[2];
dw = currNodes[tidx].pos[3] - queryPoints[tidx].pos[3];
ds = currNodes[tidx].pos[4] - queryPoints[tidx].pos[4];
dt = currNodes[tidx].pos[5] - queryPoints[tidx].pos[5];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw) + (ds*ds) + (dt*dt);
// See if we should add this point to the 'k' NN Heap
if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (unsigned int i = 1; i <= countHeap; i++)
{
unsigned int offset = (i-1) * nElems;
// Convert Nearest Neighbor Info to final format
// read from slower RAM memory into faster shared memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Get point IDs by remapping from node indices
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// write from faster shared memory into slower RAM memory
qrs[qidx+offset].Id = knnHeap[i][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i][tidx].Dist;
}
}
#endif // _GPU_KNN_LBT_CU_
|
ff58f0eba727d24a56cd209a697d0dd397dd3110.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THH/THHGeneral.h>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in hiprand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state)
* in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the
* register spilling problem.
*/
namespace {
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like hiprand_uniform4, hiprand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from hiprand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = ::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::CUDAGenerator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::make_offset_calculator<1>(iter);
hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return hiprand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return hiprand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in hiprand call for CUDA < 10]
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, rng_engine_inputs);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of hiprand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = hiprand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel_cuda(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel_cuda(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
accscalar_t sample;
// hiprand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter.dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({1}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
output.mul_(std).add_(mean);
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(mean);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& cauchy_cuda_(Tensor& self, double median, double sigma, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
cauchy_kernel_cuda(iter, median, sigma, gen);
return self;
}
Tensor& exponential_cuda_(Tensor& self, double lambda, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
exponential_kernel_cuda(iter, lambda, gen);
return self;
}
Tensor& geometric_cuda_(Tensor& self, double p, Generator* gen) {
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
auto iter = TensorIterator::nullary_op(self);
geometric_kernel_cuda(iter, p, gen);
return self;
}
Tensor& log_normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
log_normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(iter, p, gen);
return self;
}
}} // namespace at::native
|
ff58f0eba727d24a56cd209a697d0dd397dd3110.cu
|
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THC/THCGeneral.h>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in curand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
* in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using curand distributions that utilize curand4 call. curand4 call doesn't have the
* register spilling problem.
*/
namespace {
// launch bounds used for kernels utilizing TensorIterator
const uint32_t block_size_bound = 256;
const uint32_t grid_size_bound = 4;
// number of randoms given by distributions like curand_uniform4, curand_uniform2_double
// used in calculating philox offset.
const uint32_t curand4_engine_calls = 4;
// utility function that calculates proper philox_offset
// for distributions utilizing TensorIterator. For distributions using
// TensorIterator, we are using a grid-stride loop with each
// thread yielding one element per thread. For the edge of the grid-stride
// loop, if the tensor size is large, the unroll loop will kick in and the float4
// from curand4 will start getting utilized (for common tensor sizes, we end up
// using rand.x from each thread). Hence, the philox_offset is
// (number of elements per thread * number of engine calls), which makes
// sure that philox offset increment is not less than the number of randoms used
// in each thread.
std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) {
const uint64_t numel = static_cast<uint64_t>(total_elements);
const uint32_t block_size = block_size_bound;
const uint32_t unroll = curand4_engine_calls;
dim3 dim_block(block_size);
dim3 grid((numel + block_size - 1) / block_size);
uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size;
grid.x = std::min(
static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm,
grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1)
* curand4_engine_calls;
return std::make_tuple(counter_offset, grid, dim_block);
}
// grid stride loop kernel for distributions
template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t>
C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound)
__global__ void distribution_elementwise_grid_stride_kernel(int numel,
std::pair<uint64_t, uint64_t> seeds,
const dist_t dist_func,
const transform_t transform_func) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) *
blockDim.x * gridDim.x * unroll_factor;
for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) {
auto rand = dist_func(&state);
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li < numel) {
transform_func(li, static_cast<accscalar_t>((&rand.x)[ii]));
}
}
__syncthreads();
}
}
/**
* distribution_nullary_kernel is analogous to gpu_kernel in
* ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses
* TensorIterator to launch a kernel. However, the differences are
* - it launches a grid-stride loop based kernel. The kernel is not
* generic like elementwise_kernel in Loops.cuh and is specialized
* for the distribution kernels here.
* - For big size tensors, we can launch multiple kernels recursively
* (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox
* offset calculation is done in this function.
*
* FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh
* to have grid-stride loop kernel and then use that to launch our distribution
* kernels? Note that we need a grid-stride loop kernel because, we found by testing
* that it achieves peak effective bandwidth.
*/
template<typename scalar_t,
typename accscalar_t,
int unroll_factor,
typename dist_t,
typename transform_t>
void distribution_nullary_kernel(at::TensorIterator& iter,
at::CUDAGenerator* gen,
const dist_t& dist_func,
const transform_t transform_func) {
static_assert(unroll_factor >= 1, "unroll_factor must be >= 1.");
int64_t numel = iter.numel();
if (numel == 0) {
return;
}
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter,
gen, dist_func, transform_func);
}
return;
}
char* out_data = (char*)iter.data_ptr(0);
auto stream = at::cuda::getCurrentCUDAStream();
if (iter.is_trivial_1d()) {
auto strides = iter.get_inner_strides();
int stride0 = strides[0];
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
scalar_t* out = (scalar_t*)&out_data[stride0 * idx];
*out = transform_func(rand);
}
);
} else {
auto offset_calc = at::native::make_offset_calculator<1>(iter);
distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
dist_func,
[=]__device__(int idx, accscalar_t rand) {
auto offsets = offset_calc.get(idx);
scalar_t* out = (scalar_t*)&out_data[offsets[0]];
*out = transform_func(rand);
}
);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
auto uniform_lambda = [&state] __device__ () {
return curand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return curand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template <typename scalar_t>
void dirichlet_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& x,
const at::Tensor& alpha,
const at::Tensor& total) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
ret, x, alpha, total,
[] __device__ (scalar_t& ret_val, const scalar_t& x_val, const scalar_t& alpha_val, const scalar_t& total_val) {
ret_val = dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
// See Note [Register spilling in curand call for CUDA < 10]
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes());
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum,
[] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) {
ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
dirichlet_grad_cuda_kernel<scalar_t>(ret, x, alpha, total);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(10);
}
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] {
using self_t = scalar_t;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, rng_engine_inputs);
});
});
return self;
}
void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] {
auto from = static_cast<scalar_t>(from_);
auto to = static_cast<scalar_t>(to_);
TORCH_CHECK(from <= to,
"uniform_ expects to return a [from, to) range, but found from=", from,
" > to=", to);
TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
"uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
">::max(), but found to=", to, " and from=", from,
" which result in to-from to exceed the limit");
using accscalar_t = at::acc_type<scalar_t, true>;
auto range = static_cast<accscalar_t>(to-from);
from = static_cast<accscalar_t>(from);
// define lambda to reverse bounds, multiply 'range' and add 'from_'
auto uniform_func = [range, from] __device__ (accscalar_t rand) {
// reverse the bounds of curand4 from (0, 1] to [0, 1)
// Note that this method is from legacy THCTensorRandom and is likely to give
// you more 0-s, since, the probability of gettings 1-s is higher than 0-s and
// by reversing the bounds, we are flipping the probabilities of 1-s and 0-s.
auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand;
return static_cast<scalar_t>(reverse_bound_rand * range + from);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
uniform_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
uniform_func);
}
});
}
void random_kernel_cuda(TensorIterator& iter, uint64_t range, int64_t base, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half, iter.dtype(), "random_cuda", [&] {
if (std::is_same<scalar_t, double>::value || std::is_same<scalar_t, int64_t>::value) {
// define lambda to mod with range and add base
auto random_func = [range, base] __device__ (uint64_t rand) {
return static_cast<int64_t>(rand % range + base);
};
distribution_nullary_kernel<scalar_t, uint64_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 {
ulonglong2 ret;
uint4 rand_val = curand4(state);
ret.x = (static_cast<uint64_t>(rand_val.x) << 32) | rand_val.y;
ret.y = (static_cast<uint64_t>(rand_val.z) << 32) | rand_val.w;
return ret;
},
random_func);
} else {
auto random_func = [range, base] __device__ (uint32_t rand) {
return static_cast<int32_t>(rand % static_cast<uint32_t>(range) + static_cast<int32_t>(base));
};
distribution_nullary_kernel<scalar_t, uint32_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand4(state);
},
random_func);
}
});
}
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
normal_func);
}
});
}
void cauchy_kernel_cuda(TensorIterator& iter, double median_, double sigma_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "cauchy_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto median = static_cast<accscalar_t>(median_);
auto sigma = static_cast<accscalar_t>(sigma_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for cauchy transformation
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
::tan(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
cauchy_func);
} else {
// use __tanf fast approximation for peak bandwidth
auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(median + sigma *
__tanf(static_cast<accscalar_t>(M_PI) * (rand-static_cast<accscalar_t>(0.5))));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
cauchy_func);
}
});
}
void exponential_kernel_cuda(TensorIterator& iter, double lambda_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
// Note that HIP doesn't support std::nextafter in device code.
auto nextafter_1_0_float = std::nextafter(1.0f, 0.0f);
auto nextafter_1_0_double = std::nextafter(1.0, 0.0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exponential_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto lambda = static_cast<accscalar_t>(lambda_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for exponential transformation
auto exponential_func = [lambda, nextafter_1_0_double] __device__ (accscalar_t rand) {
accscalar_t sample;
// curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0.
// Hence, squash the 1 to just below 1.
if(rand == static_cast<accscalar_t>(1.0)) {
sample = ::log(nextafter_1_0_double);
} else {
sample = ::log(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
exponential_func);
} else {
// use __logf fast approximation for peak bandwidth
auto exponential_func = [lambda, nextafter_1_0_float] __device__ (accscalar_t rand) {
accscalar_t sample;
if(rand == static_cast<accscalar_t>(1.0)) {
sample = __logf(nextafter_1_0_float);
} else {
sample = __logf(rand);
}
return static_cast<scalar_t>(static_cast<accscalar_t>(-1.0) / lambda * sample);
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
exponential_func);
}
});
}
void geometric_kernel_cuda(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "geometric_cuda", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for geometric transformation
auto geometric_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(::ceil(::log(rand) / ::log(static_cast<double>(1.0)-p_)));
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
geometric_func);
} else {
auto p = static_cast<float>(p_);
auto geometric_func = [p] __device__ (float rand) {
// use __logf fast approximation for peak bandwidth
return static_cast<scalar_t>(::ceil(__logf(rand) / __logf(static_cast<float>(1.0)-p)));
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
geometric_func);
}
});
}
void log_normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "log_normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
if (std::is_same<scalar_t, double>::value) {
// define lambda for log_normal transformation
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(::exp(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
log_normal_func);
} else {
auto log_normal_func = [mean, std] __device__ (accscalar_t rand) {
// use __expf fast approximation for peak bandwidth
return static_cast<scalar_t>(__expf(rand * std + mean));
};
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
log_normal_func);
}
});
}
void bernoulli_scalar_cuda_kernel(TensorIterator& iter, double p_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, iter.dtype(), "bernoulli_scalar_cuda_", [&] {
if (std::is_same<scalar_t, double>::value) {
// define lambda for bernoulli transformation
auto bernoulli_func = [p_] __device__ (double rand) {
return static_cast<scalar_t>(rand <= p_);
};
distribution_nullary_kernel<scalar_t, double, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); },
bernoulli_func);
} else {
auto p = static_cast<float>(p_);
auto bernoulli_func = [p] __device__ (float rand) {
return static_cast<scalar_t>(rand <= p);
};
distribution_nullary_kernel<scalar_t, float, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); },
bernoulli_func);
}
});
}
Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uniform_kernel_cuda(iter, from, to, gen);
return self;
}
Tensor& random_cuda_(Tensor& self, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
uint64_t range;
auto iter_scalar_type = iter.dtype();
if (isFloatingType(iter_scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>((1ULL << std::numeric_limits<scalar_t>::digits) + 1);
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter_scalar_type, "random_cuda_range_calc", [&] {
range = static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1;
});
}
random_kernel_cuda(iter, range, 0, gen);
return self;
}
Tensor& clamped_random_cuda_(Tensor& self, int64_t from, int64_t to, Generator* gen) {
TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
auto iter = TensorIterator::nullary_op(self);
uint64_t range = to - from;
random_kernel_cuda(iter, range, from, gen);
return self;
}
Tensor& capped_random_cuda_(Tensor& self, int64_t to, Generator* gen) {
return clamped_random_cuda_(self, 0, to, gen);
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({1}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
output.mul_(std).add_(mean);
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(mean);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor& cauchy_cuda_(Tensor& self, double median, double sigma, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
cauchy_kernel_cuda(iter, median, sigma, gen);
return self;
}
Tensor& exponential_cuda_(Tensor& self, double lambda, Generator* gen) {
auto iter = TensorIterator::nullary_op(self);
exponential_kernel_cuda(iter, lambda, gen);
return self;
}
Tensor& geometric_cuda_(Tensor& self, double p, Generator* gen) {
TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
auto iter = TensorIterator::nullary_op(self);
geometric_kernel_cuda(iter, p, gen);
return self;
}
Tensor& log_normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
auto iter = TensorIterator::nullary_op(self);
log_normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
auto iter = TensorIterator::nullary_op(self);
bernoulli_scalar_cuda_kernel(iter, p, gen);
return self;
}
}} // namespace at::native
|
4a3833a57c729edcea484ccbdcb65a50c5358e96.hip
|
// !!! This is a file automatically generated by hipify!!!
// thrustest
#pragma warning( disable : 4244) // thrust::reduce int mismatch
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/execution_policy.h"
#include "thrust/for_each.h"
#include "thrust/scan.h"
#include <stdio.h>
struct printf_functor
{
__host__ __device__ void operator()(int x) { printf("%d\n", x); }
};
__global__ void init_a(int *a){
int id = blockIdx.x*blockDim.x + threadIdx.x;
a[id] = id+1;
}
__global__ void scan_a(int *a){
int id = blockIdx.x*blockDim.x + threadIdx.x;
a[id] = (id+1)%3;
}
int main(int argc,char *argv[])
{
int threads = atoi(argv[1]);
int blocks = atoi(argv[2]);
int size = threads*blocks;
thrust::host_vector<int> a(size);
thrust::device_vector<int> dev_a(size);
hipLaunchKernelGGL(( init_a), dim3(blocks),dim3(threads), 0, 0, dev_a.data().get());
a = dev_a;
int sum1 = thrust::reduce(a.begin(),a.end());
printf("sum 1 %d\n",sum1);
int sum2 = thrust::reduce(dev_a.begin(),dev_a.end());
printf("sum 2 %d\n",sum2);
// print without copy to host!
thrust::for_each(thrust::device, dev_a.begin(), dev_a.end(), printf_functor());
hipLaunchKernelGGL(( scan_a), dim3(blocks),dim3(threads), 0, 0, dev_a.data().get());
a = dev_a;
for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n");
// exclusice scan in place
//thrust::exclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan?
thrust::inclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan?
a = dev_a;
for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n");
return 0;
}
|
4a3833a57c729edcea484ccbdcb65a50c5358e96.cu
|
// thrustest
#pragma warning( disable : 4244) // thrust::reduce int mismatch
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "thrust/execution_policy.h"
#include "thrust/for_each.h"
#include "thrust/scan.h"
#include <stdio.h>
struct printf_functor
{
__host__ __device__ void operator()(int x) { printf("%d\n", x); }
};
__global__ void init_a(int *a){
int id = blockIdx.x*blockDim.x + threadIdx.x;
a[id] = id+1;
}
__global__ void scan_a(int *a){
int id = blockIdx.x*blockDim.x + threadIdx.x;
a[id] = (id+1)%3;
}
int main(int argc,char *argv[])
{
int threads = atoi(argv[1]);
int blocks = atoi(argv[2]);
int size = threads*blocks;
thrust::host_vector<int> a(size);
thrust::device_vector<int> dev_a(size);
init_a<<<blocks,threads>>>(dev_a.data().get());
a = dev_a;
int sum1 = thrust::reduce(a.begin(),a.end());
printf("sum 1 %d\n",sum1);
int sum2 = thrust::reduce(dev_a.begin(),dev_a.end());
printf("sum 2 %d\n",sum2);
// print without copy to host!
thrust::for_each(thrust::device, dev_a.begin(), dev_a.end(), printf_functor());
scan_a<<<blocks,threads>>>(dev_a.data().get());
a = dev_a;
for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n");
// exclusice scan in place
//thrust::exclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan?
thrust::inclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan?
a = dev_a;
for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n");
return 0;
}
|
611de419dfca26614718bfa2b9fa45fffdac1a43.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "SobelFilter_kernels.h"
#define SV 0.003921f
#define IV 255.f
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static hipArray *array = NULL;
#define Radius 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
__device__ short __abs(short a) {
return ((a) < 0 ? -(a) : a);
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale )
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short) (fScale*(__abs(Horz)+__abs(Vert)));
if ( Sum < 0 ) return 0; else if ( Sum > 0xff ) return 0xff;
return (unsigned char) Sum;
}
__global__ void
SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale )
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*Radius; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-Radius+0), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-Radius+1), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-Radius+2), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-Radius+3), (float) (v-Radius) );
}
if ( threadIdx.y < Radius*2 ) {
//
// copy trailing Radius*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*Radius; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-Radius+0), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-Radius+1), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-Radius+2), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-Radius+3), (float) (v+blockDim.y-Radius) );
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) {
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale );
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel( pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale );
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel( pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
if ( u+ib < w/4 && v < h ) {
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 );
unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 );
unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 );
unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 );
unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 );
unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 );
unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 );
unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 );
unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 );
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
hipChannelFormatDesc desc;
if (Bpp == 1) {
desc = hipCreateChannelDesc<unsigned char>();
} else {
desc = hipCreateChannelDesc<uchar4>();
}
cutilSafeCall(hipMallocArray(&array, &desc, iw, ih));
cutilSafeCall(hipMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, hipMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
cutilSafeCall(hipFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
cutilSafeCall(hipBindTextureToArray(tex, array));
switch ( mode ) {
case SOBELDISPLAY_IMAGE:
hipLaunchKernelGGL(( SobelCopyImage), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELTEX:
hipLaunchKernelGGL(( SobelTex), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*Radius)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*Radius);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
hipLaunchKernelGGL(( SobelShared), dim3(blocks), dim3(threads), sharedMem, 0, (uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale );
}
break;
}
cutilSafeCall(hipUnbindTexture(tex));
}
|
611de419dfca26614718bfa2b9fa45fffdac1a43.cu
|
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "SobelFilter_kernels.h"
#define SV 0.003921f
#define IV 255.f
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static cudaArray *array = NULL;
#define Radius 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
__device__ short __abs(short a) {
return ((a) < 0 ? -(a) : a);
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale )
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short) (fScale*(__abs(Horz)+__abs(Vert)));
if ( Sum < 0 ) return 0; else if ( Sum > 0xff ) return 0xff;
return (unsigned char) Sum;
}
__global__ void
SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale )
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*Radius; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-Radius+0), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-Radius+1), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-Radius+2), (float) (v-Radius) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-Radius+3), (float) (v-Radius) );
}
if ( threadIdx.y < Radius*2 ) {
//
// copy trailing Radius*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*Radius; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-Radius+0), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-Radius+1), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-Radius+2), (float) (v+blockDim.y-Radius) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-Radius+3), (float) (v+blockDim.y-Radius) );
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) {
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale );
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel( pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale );
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel( pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
if ( u+ib < w/4 && v < h ) {
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 );
unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 );
unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 );
unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 );
unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 );
unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 );
unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 );
unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 );
unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 );
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
cudaChannelFormatDesc desc;
if (Bpp == 1) {
desc = cudaCreateChannelDesc<unsigned char>();
} else {
desc = cudaCreateChannelDesc<uchar4>();
}
cutilSafeCall(cudaMallocArray(&array, &desc, iw, ih));
cutilSafeCall(cudaMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, cudaMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
cutilSafeCall(cudaFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
cutilSafeCall(cudaBindTextureToArray(tex, array));
switch ( mode ) {
case SOBELDISPLAY_IMAGE:
SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELTEX:
SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*Radius)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*Radius);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
SobelShared<<<blocks, threads, sharedMem>>>((uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale );
}
break;
}
cutilSafeCall(cudaUnbindTexture(tex));
}
|
d727e1600d66c8e9539965d32a9633db1bbb471e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.hip"
#include "tabs/sbox.tab"
union u32_t {
uint i;
uchar c[4];
};
#if TTABLE == 128
#define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2))
#define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2))
#define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4))
#elif TTABLE == 64
#define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1))
#define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1))
#define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1))
#define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1))
#define STE(state) ((STE_64_0(state) & 0x03) \
| ((STE_64_1(state) & 0x03) << 2) \
| ((STE_64_2(state) & 0x03) << 4) \
| ((STE_64_3(state) & 0x03) << 6))
#elif TTABLE == 32
#define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7))
#define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7))
#define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7))
#define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7))
#define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7))
#define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7))
#define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7))
#define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7))
#define STE(state) ((STE_32_0(state) & 0x01) \
| ((STE_32_1(state) & 0x01) << 1) \
| ((STE_32_2(state) & 0x01) << 2) \
| ((STE_32_3(state) & 0x01) << 3) \
| ((STE_32_4(state) & 0x01) << 4) \
| ((STE_32_5(state) & 0x01) << 5) \
| ((STE_32_6(state) & 0x01) << 6) \
| ((STE_32_7(state) & 0x01) << 7))
#else
#define STE(state) (sbox[state])
#endif
#define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b);
__device__ void TransposeSelf(uchar *state) {
SWAP(state[1], state[4]);
SWAP(state[2], state[8]);
SWAP(state[3], state[12]);
SWAP(state[6], state[9]);
SWAP(state[7], state[13]);
SWAP(state[11], state[14]);
}
__device__ void Transpose(uchar *dst, uchar *src) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
dst[j*4+i] = src[i*4+j];
}
}
}
__device__ void AddRoundKey(uchar *state, uchar *rek) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
state[j*4+i] ^= rek[i*4+3-j];
}
}
}
__device__ void SubBytesSecure(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = STE(state[i]);//sbox[state[i]];
}
}
__device__ void SubBytes(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = sbox[state[i]];
}
}
#define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b))
__device__ void MixColumns(uchar *state) {
uchar Tmp, Tm, t;
for(int i = 0; i < 4; i++) {
t = state[i];
Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ;
Tm = state[i] ^ state[4+i] ;
Tm = xtime(Tm);
state[i] ^= Tm ^ Tmp ;
Tm = state[4+i] ^ state[8+i] ;
Tm = xtime(Tm);
state[4+i] ^= Tm ^ Tmp ;
Tm = state[8+i] ^ state[12+i] ;
Tm = xtime(Tm);
state[8+i] ^= Tm ^ Tmp ;
Tm = state[12+i] ^ t ;
Tm = xtime(Tm);
state[12+i] ^= Tm ^ Tmp ;
}
}
__device__ void ShiftRows(uchar *state) {
uchar temp;
// Rotate first row 1 columns to left
temp = state[4];
state[4] = state[5];
state[5] = state[6];
state[6] = state[7];
state[7] = temp;
// Rotate second row 2 columns to left
temp = state[8];
state[8] = state[10];
state[10] = temp;
temp = state[9];
state[9] = state[11];
state[11] = temp;
// Rotate third row 3 columns to left
temp = state[12];
state[12] = state[15];
state[15] = state[14];
state[14] = state[13];
state[13] = temp;
}
#define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000)
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
u32_t state[4];
//uchar state[16];
uchar *_rk = (uchar *)rek;
#ifdef USE_SMEM
__shared__ uchar sbox[256];
#if TTABLE == 256
load_smem_sbox(sbox, Tsbox_256);
#elif TTABLE == 128
load_smem_sbox(sbox, Tsbox_128);
#elif TTABLE == 64
load_smem_sbox(sbox, Tsbox_64);
#elif TTABLE == 32
load_smem_sbox(sbox, Tsbox_32);
#endif // TTABLE
#else
#if TTABLE == 256
uchar *sbox = Tsbox_256;
#elif TTABLE == 128
uchar *sbox = Tsbox_128;
#elif TTABLE == 64
uchar *sbox = Tsbox_64;
#elif TTABLE == 32
uchar *sbox = Tsbox_32;
#endif // TTABLE
#endif // USE_SMEM
uchar *sbox_256 = Tsbox_256;
int iter = 0;
BEGIN:
int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2;
if (offset >= size) return;
state[0].i = REV_ENDIAN(pt[offset + 0]);
state[1].i = REV_ENDIAN(pt[offset + 1]);
state[2].i = REV_ENDIAN(pt[offset + 2]);
state[3].i = REV_ENDIAN(pt[offset + 3]);
TransposeSelf((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)_rk);
SubBytesSecure((uchar*)state, sbox);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + 4));
for (int i = 2; i < Nr; i++)
{
SubBytes((uchar*)state, sbox_256);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + i*4));
}
/*
SubBytesSecure((uchar*)state, sbox);
ShiftRows((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4));
*/
TransposeSelf((uchar*)state);
ct[offset + 0] = REV_ENDIAN(state[0].i);
ct[offset + 1] = REV_ENDIAN(state[1].i);
ct[offset + 2] = REV_ENDIAN(state[2].i);
ct[offset + 3] = REV_ENDIAN(state[3].i);
iter++;
goto BEGIN;
}
|
d727e1600d66c8e9539965d32a9633db1bbb471e.cu
|
#include "util.cu"
#include "tabs/sbox.tab"
union u32_t {
uint i;
uchar c[4];
};
#if TTABLE == 128
#define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2))
#define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2))
#define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4))
#elif TTABLE == 64
#define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1))
#define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1))
#define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1))
#define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1))
#define STE(state) ((STE_64_0(state) & 0x03) \
| ((STE_64_1(state) & 0x03) << 2) \
| ((STE_64_2(state) & 0x03) << 4) \
| ((STE_64_3(state) & 0x03) << 6))
#elif TTABLE == 32
#define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7))
#define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7))
#define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7))
#define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7))
#define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7))
#define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7))
#define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7))
#define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7))
#define STE(state) ((STE_32_0(state) & 0x01) \
| ((STE_32_1(state) & 0x01) << 1) \
| ((STE_32_2(state) & 0x01) << 2) \
| ((STE_32_3(state) & 0x01) << 3) \
| ((STE_32_4(state) & 0x01) << 4) \
| ((STE_32_5(state) & 0x01) << 5) \
| ((STE_32_6(state) & 0x01) << 6) \
| ((STE_32_7(state) & 0x01) << 7))
#else
#define STE(state) (sbox[state])
#endif
#define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b);
__device__ void TransposeSelf(uchar *state) {
SWAP(state[1], state[4]);
SWAP(state[2], state[8]);
SWAP(state[3], state[12]);
SWAP(state[6], state[9]);
SWAP(state[7], state[13]);
SWAP(state[11], state[14]);
}
__device__ void Transpose(uchar *dst, uchar *src) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
dst[j*4+i] = src[i*4+j];
}
}
}
__device__ void AddRoundKey(uchar *state, uchar *rek) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
state[j*4+i] ^= rek[i*4+3-j];
}
}
}
__device__ void SubBytesSecure(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = STE(state[i]);//sbox[state[i]];
}
}
__device__ void SubBytes(uchar *state, uchar *sbox) {
for (int i = 0; i < 16; i++) {
state[i] = sbox[state[i]];
}
}
#define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b))
__device__ void MixColumns(uchar *state) {
uchar Tmp, Tm, t;
for(int i = 0; i < 4; i++) {
t = state[i];
Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ;
Tm = state[i] ^ state[4+i] ;
Tm = xtime(Tm);
state[i] ^= Tm ^ Tmp ;
Tm = state[4+i] ^ state[8+i] ;
Tm = xtime(Tm);
state[4+i] ^= Tm ^ Tmp ;
Tm = state[8+i] ^ state[12+i] ;
Tm = xtime(Tm);
state[8+i] ^= Tm ^ Tmp ;
Tm = state[12+i] ^ t ;
Tm = xtime(Tm);
state[12+i] ^= Tm ^ Tmp ;
}
}
__device__ void ShiftRows(uchar *state) {
uchar temp;
// Rotate first row 1 columns to left
temp = state[4];
state[4] = state[5];
state[5] = state[6];
state[6] = state[7];
state[7] = temp;
// Rotate second row 2 columns to left
temp = state[8];
state[8] = state[10];
state[10] = temp;
temp = state[9];
state[9] = state[11];
state[11] = temp;
// Rotate third row 3 columns to left
temp = state[12];
state[12] = state[15];
state[15] = state[14];
state[14] = state[13];
state[13] = temp;
}
#define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000)
__global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
u32_t state[4];
//uchar state[16];
uchar *_rk = (uchar *)rek;
#ifdef USE_SMEM
__shared__ uchar sbox[256];
#if TTABLE == 256
load_smem_sbox(sbox, Tsbox_256);
#elif TTABLE == 128
load_smem_sbox(sbox, Tsbox_128);
#elif TTABLE == 64
load_smem_sbox(sbox, Tsbox_64);
#elif TTABLE == 32
load_smem_sbox(sbox, Tsbox_32);
#endif // TTABLE
#else
#if TTABLE == 256
uchar *sbox = Tsbox_256;
#elif TTABLE == 128
uchar *sbox = Tsbox_128;
#elif TTABLE == 64
uchar *sbox = Tsbox_64;
#elif TTABLE == 32
uchar *sbox = Tsbox_32;
#endif // TTABLE
#endif // USE_SMEM
uchar *sbox_256 = Tsbox_256;
int iter = 0;
BEGIN:
int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2;
if (offset >= size) return;
state[0].i = REV_ENDIAN(pt[offset + 0]);
state[1].i = REV_ENDIAN(pt[offset + 1]);
state[2].i = REV_ENDIAN(pt[offset + 2]);
state[3].i = REV_ENDIAN(pt[offset + 3]);
TransposeSelf((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)_rk);
SubBytesSecure((uchar*)state, sbox);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + 4));
for (int i = 2; i < Nr; i++)
{
SubBytes((uchar*)state, sbox_256);
ShiftRows((uchar*)state);
MixColumns((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + i*4));
}
/*
SubBytesSecure((uchar*)state, sbox);
ShiftRows((uchar*)state);
AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4));
*/
TransposeSelf((uchar*)state);
ct[offset + 0] = REV_ENDIAN(state[0].i);
ct[offset + 1] = REV_ENDIAN(state[1].i);
ct[offset + 2] = REV_ENDIAN(state[2].i);
ct[offset + 3] = REV_ENDIAN(state[3].i);
iter++;
goto BEGIN;
}
|
da93047e8ce6607d17bd4c23b67f336b49a523cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mctCudaFDKAlgorithm.h"
texture<float, hipTextureType3D,hipReadModeElementType> Prj_tex;
__constant__ float cSrcAxisDist,cSrcDetDist;
__constant__ int cDetColumn,cDetRow;
__constant__ float cBinWidth,cBinHeight;
__constant__ int cReconWidth,cReconHeight,cReconSlice;
__constant__ float cPixSizeX,cPixSizeY, cPixSizeZ, cFOV,cCoef;
__constant__ int cPrjNum;
__constant__ float cInvBinWidth, cInvBinHeight;
__constant__ float cReconWidthMid, cReconHeightMid, cReconSliceMid;
__constant__ float cDetColumnRayCenter, cDetRowRayCenter;
__constant__ float cSDDInvBinHeight,cReconZminSDDInvBinHeight, cSDDInvBinHeightSliceSpace;
static const unsigned g_MaxAngles = 1440;
__constant__ float gC_angle_sin[g_MaxAngles];
__constant__ float gC_angle_cos[g_MaxAngles];
__global__ void Correction(float *dSinoData,int iPaddedDetCount,float *dCorrectionMatrix,int DetColumn,int DetRow,int PrjSize)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
if (i < DetColumn && j < DetRow*PrjSize)
{
dSinoData[j*iPaddedDetCount + i] *= dCorrectionMatrix[(j%DetRow)*DetColumn + i];
}
}
__global__ void Filter_FFT(hipfftComplex *dFilteredSinoData,hipfftComplex *dRampfilter,int DetRow,int iHalfFFTSize)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
if (i < iHalfFFTSize && j < DetRow)
{
dFilteredSinoData[j*iHalfFFTSize + i].x *= dRampfilter[i].x*cCoef;
dFilteredSinoData[j*iHalfFFTSize + i].y *= dRampfilter[i].x*cCoef;
}
}
__global__ void BP (float *dReconData, size_t Pitch, int iBatch)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
int k = threadIdx.z + mul24(blockDim.z,blockIdx.z);
float x = (i-cReconWidthMid)*cPixSizeX;
float y = (j-cReconHeightMid)*cPixSizeY;
int p = iBatch*cPrjNum + k;
if ( x*x + y*y < cFOV*cFOV)
{
float t = x*gC_angle_cos[p]+y*gC_angle_sin[p];
float s = cSrcAxisDist + x*gC_angle_sin[p]-y*gC_angle_cos[p];
float L2 = t*t+s*s;
float m = atanf(t/s)*cInvBinWidth + cDetColumnRayCenter;
float n = rsqrtf(L2)*cReconZminSDDInvBinHeight + cDetRowRayCenter;
float dn = rsqrtf(L2)*cSDDInvBinHeightSliceSpace;
#pragma unroll
for (int iz = 0; iz < cReconSlice; iz++)
{
atomicAdd(dReconData+ iz*Pitch*cReconHeight +j*Pitch +i, tex3D(Prj_tex,m,n,k+0.5f)/L2);
n += dn;
}
}
}
int calcZeropadFactorPowerOfTwo(int n, int iZeropad)
{
if (iZeropad > 0)
{
double logBase2 = log((double)n) / log((double)2);
int nextPowerOf2 = static_cast<int>(floor(logBase2));
if (logBase2 != floor(logBase2))
nextPowerOf2++;
nextPowerOf2 += (iZeropad - 1);
n = 1 << nextPowerOf2;
}
return n;
}
namespace mct
{
const int CFDK::m_nBatch = 5;//4;
CFDK::CFDK()
{
this->InitParams();
}
CFDK::~CFDK(void)
{
this->FreeObjects();
}
void CFDK::InitParams()
{
this->m_PrjArray = NULL;
this->m_PrjArrayLen = 0;
this->m_DCorrectionMatrix = NULL;
this->m_DReconData = NULL;
this->m_DReconDataLenSize = 0;
this->m_DReconDataLenCount = 0;
this->m_DsinoData = NULL;
this->m_DsinoDataLen = 0;
this->m_DFilteredsinoData = NULL;
this->m_DFilteredsinoDataLen = 0;
this->m_RampFilter = NULL;
this->m_RampFilterLen = 0;
this->hReconData = NULL;
this->hReconDataLen = 0;
this->m_ProjectionAngleCountMatix = 0;
this->m_ProjectionAngleCountRampFilter = 0;
this->m_ProjectionAngleCountAngle = 0;
this->m_DetectorSpacingMatix = 0;
this->m_DetectorSpacingRampFilter = 0;
this->m_FFTLen = 0;
this->m_iPaddedDetCountOld =0;
}
void CFDK::FreeObjects()
{
hipFreeArray(m_PrjArray);
m_PrjArray = NULL;
hipFree(m_DCorrectionMatrix);
m_DCorrectionMatrix = NULL;
hipFree(m_DReconData);
m_DReconData = NULL;
hipFree(m_DsinoData);
m_DsinoData = NULL;
hipFree(m_DFilteredsinoData);
m_DFilteredsinoData = NULL;
hipFree(m_RampFilter);
m_RampFilter = NULL;
hipfftDestroy(m_FwdFFT);
hipfftDestroy(m_BwdFFT);
this->m_FFTLen = 0;
this->m_iPaddedDetCountOld =0;
if(this->hReconData != NULL)
{
delete[] this->hReconData;
this->hReconData = NULL;
this->hReconDataLen = 0;
}
}
bool CFDK::SetParams(ScannerGeometry scanGeometry, ProjectionParams prjParams, ReconstructionParams reconParams, float* hSinoData, int iGPUIndex)
{
SetPrjGeometry(scanGeometry, prjParams, reconParams);
if(!setGPUIndex(iGPUIndex))
{
return false;
}
if(!CpyToSymbol())
{
return false;
}
if(!allocateBuffers())
{
return false;
}
if(!genRampFilter())
{
return false;
}
if(!caculateCorrectMatix())
{
return false;
}
m_hSinoData = hSinoData;
return true;
}
void CFDK::SetPrjGeometry(ScannerGeometry scanGeometry, ProjectionParams prjParams, ReconstructionParams reconParams)
{
m_SourceToIsoCenterDistance = scanGeometry.m_SourceToIsoCenterDistance;
m_SourceToDetectorDistance = scanGeometry.m_SourceToDetectorDistance;
m_DetectorSpacingX = scanGeometry.m_DetectorSpacingX;
m_DetectorSpacingY = scanGeometry.m_DetectorSpacingY;
m_DetectorColumnCount = scanGeometry.m_DetectorColumnCount + scanGeometry.m_DetectorCount - 1;
m_DetectorRowCount = scanGeometry.m_DetectorRowCount;
m_DetectorColumnRayCenter = scanGeometry.m_DetectorColumnRayCenter;
m_DetectorRowRayCenter = scanGeometry.m_DetectorRowCount/2-0.5f; //scanGeometry.m_DetectorRowRayCenter;
m_ProjectionAngleCount = prjParams.m_ProjectionAngleCount;
m_ProjectionAngleStart = prjParams.m_ProjectionAngleStart;
m_ProjectionAngleStep = -2*PI/prjParams.m_ProjectionAnglesPerRotation;
m_DetectorLengthX = scanGeometry.m_DetectorSpacingX*m_DetectorColumnCount;
m_DetectorLengthY = scanGeometry.m_DetectorSpacingY*m_DetectorRowCount;
m_fFOV = m_SourceToIsoCenterDistance*sin(0.5f*(m_DetectorLengthX-m_DetectorSpacingX));
m_ReconColumnCount = reconParams.m_ReconColumnCount;
m_ReconRowCount = reconParams.m_ReconRowCount;
m_ReconSliceCount = reconParams.m_ReconSliceCount*reconParams.m_MergedNum; //MergedNum 2015.12.16
m_nMergeNum = reconParams.m_MergedNum;
m_ReconWindowMidColumn = reconParams.m_ReconWindowMidColumn;
m_ReconWindowMidRow = reconParams.m_ReconWindowMidRow;
m_ReconWindowMidSlice = reconParams.m_ReconWindowMidSlice;
m_PixelSpacingX = reconParams.m_PixelSpacingX;
m_PixelSpacingY = reconParams.m_PixelSpacingY;
m_PixelSpacingZ = reconParams.m_PixelSpacingZ;
m_nPrjBatchSize = m_ProjectionAngleCount/m_nBatch;
m_iPaddedDetCount = calcZeropadFactorPowerOfTwo(2*m_DetectorColumnCount-1, 1);
m_iHalfFFTSize = (m_iPaddedDetCount/2 + 1);
}
bool CFDK::CpyToSymbol()
{
// 2015.11.26
hipMemcpyToSymbol(cSrcAxisDist,&m_SourceToIsoCenterDistance,sizeof(float));
hipMemcpyToSymbol(cSrcDetDist,&m_SourceToDetectorDistance,sizeof(float));
hipMemcpyToSymbol(cBinWidth,&m_DetectorSpacingX,sizeof(float));
hipMemcpyToSymbol(cBinHeight,&m_DetectorSpacingY,sizeof(float));
hipMemcpyToSymbol(cPixSizeX,&m_PixelSpacingX,sizeof(float));
hipMemcpyToSymbol(cPixSizeY,&m_PixelSpacingY,sizeof(float));
hipMemcpyToSymbol(cPixSizeZ,&m_PixelSpacingZ,sizeof(float));
hipMemcpyToSymbol(cFOV,&m_fFOV,sizeof(float));
hipMemcpyToSymbol(cDetColumn,&m_DetectorColumnCount,sizeof(int));
hipMemcpyToSymbol(cDetRow,&m_DetectorRowCount,sizeof(int));
hipMemcpyToSymbol(cReconWidth,&m_ReconColumnCount,sizeof(int));
hipMemcpyToSymbol(cReconHeight,&m_ReconRowCount,sizeof(int));
hipMemcpyToSymbol(cReconSlice,&m_ReconSliceCount,sizeof(int));
hipMemcpyToSymbol(cPrjNum,&m_nPrjBatchSize,sizeof(int));
hipMemcpyToSymbol(cReconWidthMid,&m_ReconWindowMidColumn,sizeof(float));
hipMemcpyToSymbol(cReconHeightMid,&m_ReconWindowMidRow,sizeof(float));
hipMemcpyToSymbol(cReconSliceMid,&m_ReconWindowMidSlice,sizeof(float));
hipMemcpyToSymbol(cDetColumnRayCenter,&m_DetectorColumnRayCenter,sizeof(float));
hipMemcpyToSymbol(cDetRowRayCenter,&m_DetectorRowRayCenter,sizeof(float));
float InvBinWidth = 1.f/m_DetectorSpacingX;
float InvBinHeight = 1.f/m_DetectorSpacingY;
hipMemcpyToSymbol(cInvBinWidth,&InvBinWidth,sizeof(float));
hipMemcpyToSymbol(cInvBinHeight,&InvBinHeight,sizeof(float));
float coef = 1.f/m_iPaddedDetCount*abs(m_DetectorSpacingX*m_ProjectionAngleStep*m_SourceToIsoCenterDistance);
hipMemcpyToSymbol(cCoef,&coef,sizeof(float));
float SDDInvBinHeight = m_SourceToDetectorDistance/m_DetectorSpacingY;
float ReconZminSDDInvBinHeight = (-m_ReconWindowMidSlice)*m_PixelSpacingZ*SDDInvBinHeight;
float SDDInvBinHeightSliceSpace = m_SourceToDetectorDistance/m_DetectorSpacingY*m_PixelSpacingZ;
hipMemcpyToSymbol(cSDDInvBinHeight,&SDDInvBinHeight,sizeof(float));
hipMemcpyToSymbol(cReconZminSDDInvBinHeight,&ReconZminSDDInvBinHeight,sizeof(float));
hipMemcpyToSymbol(cSDDInvBinHeightSliceSpace,&SDDInvBinHeightSliceSpace,sizeof(float));
if(this->m_ProjectionAngleCountAngle != m_ProjectionAngleCount)
{
float* angle_sin = new float[m_ProjectionAngleCount];
if(angle_sin == NULL)
return false;
float* angle_cos = new float[m_ProjectionAngleCount];
if(angle_cos == NULL)
{
delete []angle_sin;
return false;
}
float angles = m_ProjectionAngleStart; //TODO
for (unsigned int i = 0; i < m_ProjectionAngleCount; ++i)
{
angle_sin[i] = sinf(angles);
angle_cos[i] = cosf(angles);
angles += m_ProjectionAngleStep;
}
hipMemcpyToSymbol(gC_angle_sin, angle_sin, m_ProjectionAngleCount*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(gC_angle_cos, angle_cos, m_ProjectionAngleCount*sizeof(float), 0, hipMemcpyHostToDevice);
delete []angle_sin;
delete []angle_cos;
this->m_ProjectionAngleCountAngle = this->m_ProjectionAngleCount;
}
return true;
}
bool CFDK::setGPUIndex(int iGPUIndex)
{
hipSetDevice(iGPUIndex);
hipError_t err = hipGetLastError();
// Ignore errors caused by calling hipSetDevice multiple times
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
return false;
return true;
}
bool CFDK::allocateBuffers()
{
hipError_t err;
int dsinoDataLen;
if((this->m_DReconData == NULL) || (this->m_DReconDataLenSize != m_ReconRowCount) ||(this->m_DReconDataLenCount!=m_ReconSliceCount))
{
if(this->m_DReconData != NULL)
{
hipFree(this->m_DReconData);
this->m_DReconData == NULL;
this->m_DReconDataLenSize = 0;
this->m_DReconDataLenCount = 0;
}
/// <summary>
/// Allocate GPU Memory for Reconstruction Output
/// </summary>
err = hipMallocPitch((void**)&this->m_DReconData, &m_nReconDataPitch, sizeof(float)*m_ReconColumnCount, m_ReconRowCount*m_ReconSliceCount);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DReconDataLenSize = m_ReconRowCount;
this->m_DReconDataLenCount = m_ReconSliceCount;
}
else
{
m_nReconDataPitch = sizeof(float)*m_ReconColumnCount;
}
err = hipMemset2D(this->m_DReconData, m_nReconDataPitch, 0, sizeof(float)*m_ReconColumnCount, m_ReconRowCount*m_ReconSliceCount);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
m_nReconDataPitch = m_nReconDataPitch/sizeof(float);
dsinoDataLen = sizeof(hipfftReal)*m_DetectorRowCount*m_iPaddedDetCount*m_nPrjBatchSize;
if((this->m_DCorrectionMatrix == NULL) || (this->m_DsinoDataLen != dsinoDataLen))
{
if(this->m_DCorrectionMatrix != NULL)
{
hipFree(this->m_DCorrectionMatrix);
this->m_DCorrectionMatrix = NULL;
}
/// <summary>
/// Allocate GPU Memory for FDK correction mattrix
/// </summary>
err = hipMalloc((void**)&this->m_DCorrectionMatrix, sizeof(float)*m_DetectorColumnCount*m_DetectorRowCount);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
}
if((this->m_DsinoData == NULL) || (this->m_DsinoDataLen != dsinoDataLen))
{
if(this->m_DsinoData != NULL)
{
hipFree(this->m_DsinoData);
this->m_DsinoData = NULL;
}
/// <summary>
/// Allocate GPU Memory for Sinogram Data
///
/// </summary>
err = hipMalloc((void**)&this->m_DsinoData, dsinoDataLen);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DsinoDataLen = dsinoDataLen;
}
if((this->m_DFilteredsinoData ==NULL) || (this->m_DFilteredsinoDataLen != sizeof(hipfftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize))
{
if(this->m_DFilteredsinoData !=NULL)
{
hipFree(this->m_DFilteredsinoData);
this->m_DFilteredsinoData = NULL;
this->m_DFilteredsinoDataLen = 0;
}
/// <summary>
/// Allocate Memory for Filtered Sinogram Data
/// </summary>
err = hipMalloc((void**)&this->m_DFilteredsinoData, sizeof(hipfftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DFilteredsinoDataLen = sizeof(hipfftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize;
}
if((this->m_RampFilter ==NULL) || (this->m_RampFilterLen != sizeof(hipfftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize))
{
if(this->m_RampFilter !=NULL)
{
hipFree(this->m_RampFilter);
this->m_RampFilter = NULL;
this->m_RampFilterLen = 0;
}
/// <summary>
/// Allocate GPU Memory for Ramp Filter
/// </summary>
err = hipMalloc((void**)&this->m_RampFilter, sizeof(hipfftComplex)*m_iHalfFFTSize);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_RampFilterLen = sizeof(hipfftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize;
}
if((this->m_PrjArray == NULL) || (this->m_PrjArrayLen != dsinoDataLen))
{
if(this->m_PrjArray != NULL)
{
hipFreeArray(this->m_PrjArray);
this->m_PrjArray = NULL;
this->m_PrjArrayLen = 0;
}
/// <summary>
/// Allocate GPU Memory for BackProjection Sinogram Array
/// </summary>
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipExtent extent = make_hipExtent(m_DetectorColumnCount,m_DetectorRowCount,m_nPrjBatchSize);
err = hipMalloc3DArray(&this->m_PrjArray,&channelDesc,extent);
if(hipSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_PrjArrayLen = dsinoDataLen;
}
if((this->m_iPaddedDetCountOld != m_iPaddedDetCount) || (this->m_FFTLen != m_DetectorRowCount*m_nPrjBatchSize))
{
if((this->m_iPaddedDetCountOld != 0) ||(this->m_FFTLen != 0))
{
//Frees all GPU resources associated with a cuFFT plan and destroys the internal plan data structure.
//This function should be called once a plan is no longer needed, to avoid wasting GPU memory
hipfftDestroy(m_FwdFFT);
hipfftDestroy(m_BwdFFT);
}
hipfftPlan1d(&m_FwdFFT, m_iPaddedDetCount, HIPFFT_R2C, m_DetectorRowCount*m_nPrjBatchSize);
hipfftPlan1d(&m_BwdFFT, m_iPaddedDetCount, HIPFFT_C2R, m_DetectorRowCount*m_nPrjBatchSize);
this->m_iPaddedDetCountOld = m_iPaddedDetCount;
this->m_FFTLen = m_DetectorRowCount*m_nPrjBatchSize;
}
return true;
}
bool CFDK::caculateCorrectMatix()
{
if((this->m_DetectorSpacingMatix != m_DetectorSpacingX) || (this->m_ProjectionAngleCountMatix!=this->m_ProjectionAngleCount)) //m_DetectorSpacingXm_DetectorSpacingY
{
float *hCorrectionMatrix = new float[m_DetectorColumnCount * m_DetectorRowCount ];
if(hCorrectionMatrix == NULL)
return false;
for (size_t j = 0; j < m_DetectorRowCount; j++)
{
float y = (j-m_DetectorRowRayCenter)*m_DetectorSpacingY;
float cosa = m_SourceToDetectorDistance/sqrt(m_SourceToDetectorDistance*m_SourceToDetectorDistance + y*y);
for (size_t i = 0; i < m_DetectorColumnCount; i++)
{
float x = (i-m_DetectorColumnRayCenter)*m_DetectorSpacingX;
hCorrectionMatrix[j*m_DetectorColumnCount+i] = cosa*cos(x);
}
}
hipError_t err;
err = hipMemcpy(m_DCorrectionMatrix, hCorrectionMatrix, sizeof(float)*m_DetectorColumnCount*m_DetectorRowCount, hipMemcpyHostToDevice);
CUDA_ASSERT(err);
delete []hCorrectionMatrix;
this->m_DetectorSpacingMatix = m_DetectorSpacingX;
this->m_ProjectionAngleCountMatix =this->m_ProjectionAngleCount;
}
return true;
}
bool CFDK::genRampFilter()
{
if((this->m_DetectorSpacingRampFilter != m_DetectorSpacingX) ||(this->m_ProjectionAngleCountRampFilter!=this->m_ProjectionAngleCount))
{
float *rampFilter = new float[m_iPaddedDetCount];
if(rampFilter == NULL)
return false;
/// <summary>
/// Step 1: Caculate RampFilter Spatial Domain Response
/// </summary>
memset(rampFilter,0,sizeof(float)*m_iPaddedDetCount);
for (size_t i = 1;i < m_DetectorColumnCount;i += 2)
{
rampFilter[i] = rampFilter[m_iPaddedDetCount-i] = -1.f/(2*PI*PI*sin(i*m_DetectorSpacingX)*sin(i*m_DetectorSpacingX));
}
rampFilter[0] = 0.125f/(m_DetectorSpacingX*m_DetectorSpacingX);
/// <summary>
/// Step 2: Copy to GPU Memory
/// </summary>
float *DrampFilter;
hipError_t err;
err = hipMalloc((void**)&DrampFilter, sizeof(hipfftReal)*m_iPaddedDetCount);
CUDA_ASSERT(err);
err = hipMemcpy(DrampFilter, rampFilter, sizeof(hipfftReal)*m_iPaddedDetCount, hipMemcpyHostToDevice);
CUDA_ASSERT(err);
/// <summary>
/// Step 3: FFT and get RampFilter's frequency domain spectrumn
/// </summary>
hipfftHandle RampFilterFFT;
hipfftPlan1d(&RampFilterFFT, m_iPaddedDetCount, HIPFFT_R2C, 1);
hipfftExecR2C(RampFilterFFT,DrampFilter, m_RampFilter);
delete []rampFilter;
hipFree(DrampFilter);
hipfftDestroy(RampFilterFFT);
this->m_DetectorSpacingRampFilter = m_DetectorSpacingX;
this->m_ProjectionAngleCountRampFilter = this->m_ProjectionAngleCount;
//genFilter(E_FBPFILTER::FILTER_RAMLAK, m_fBinWidth, m_nDetColumn,
// 0, E_FILTER_GENERATION::FILTER_GENERATION_INVERSE_FOURIER, m_RampFilter,
// 1, E_SCANNER::GEOMETRY_EQUIANGULAR, m_fSrcAxisDist, m_fSrcDetDist);
}
return true;
}
bool CFDK::bindProjDataTexture(const hipArray* array)
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
Prj_tex.addressMode[0] = hipAddressModeClamp;
Prj_tex.addressMode[1] = hipAddressModeClamp;
Prj_tex.addressMode[2] = hipAddressModeClamp;
Prj_tex.filterMode = hipFilterModeLinear;
Prj_tex.normalized = false;
hipBindTextureToArray(Prj_tex, array, channelDesc);
// TODO: error value?
return true;
}
bool CFDK::getReconstruction(float* hMergeReconData)
{
int reconDataLen = m_ReconColumnCount*m_ReconRowCount*m_ReconSliceCount;
if((this->hReconData == NULL) || (this->hReconDataLen != reconDataLen))
{
if(this->hReconData != NULL)
{
delete[] this->hReconData;
this->hReconData = NULL;
this->hReconDataLen = 0;
}
this->hReconData = new float[reconDataLen];
memset(hReconData, 0, sizeof(float)*reconDataLen);
this->hReconDataLen = reconDataLen;
}
hipMemcpy2D(hReconData,
sizeof(float)*m_ReconColumnCount,
m_DReconData,
sizeof(float)*m_nReconDataPitch,
sizeof(float)*m_ReconColumnCount,
m_ReconRowCount*m_ReconSliceCount,
hipMemcpyDeviceToHost );
float* pReconData;
float* pMergeData;
int dataCountPerSlice =m_ReconColumnCount*m_ReconRowCount;
for( int iSlice=0; iSlice<m_ReconSliceCount; iSlice=iSlice+m_nMergeNum)
{
int iSRIndex = iSlice*dataCountPerSlice;
int iSMIndex = dataCountPerSlice*iSlice/m_nMergeNum;
for(int iMatrix=0; iMatrix<dataCountPerSlice; iMatrix++ )
{
pReconData = hReconData + iSRIndex + iMatrix ;
pMergeData = hMergeReconData + iSMIndex + iMatrix;
float all=0;
for(int iM =0; iM < m_nMergeNum; ++iM)
{
all += *(pReconData + iM*dataCountPerSlice);
}
*pMergeData = all/m_nMergeNum;
}
}
hipError_t err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
return false;
}
return true;
}
bool CFDK::CallRecon()
{
bool isOK = true;
hipError_t err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
isOK = false;
}
if(isOK)
{
dim3 Block(16,16,1);
dim3 DetGrid((m_DetectorColumnCount+15)/16,(m_DetectorRowCount*m_nPrjBatchSize+15)/16,1);
dim3 FFTGrid((m_iHalfFFTSize+15)/16,(m_DetectorRowCount*m_nPrjBatchSize+15)/16,1);
dim3 BPGrid((m_ReconColumnCount+15)/16,(m_ReconRowCount+15)/16,m_nPrjBatchSize);
hipMemcpy3DParms cpy3d = {0};
cpy3d.srcPtr = make_hipPitchedPtr(m_DsinoData, sizeof(float)*m_iPaddedDetCount, sizeof(float)*m_DetectorColumnCount, m_DetectorRowCount);//
cpy3d.dstArray = m_PrjArray;
cpy3d.extent = make_hipExtent(m_DetectorColumnCount, m_DetectorRowCount, m_nPrjBatchSize);
cpy3d.kind = hipMemcpyDeviceToDevice;
for (int j = 0 ; j < m_nBatch; j++)
{
/// <summary>
/// Copy host Sinogram to GPU Memory
/// </summary>
hipMemset(m_DsinoData, 0, sizeof(hipfftReal)*m_DetectorRowCount*m_iPaddedDetCount* m_nPrjBatchSize);
hipMemcpy2D(m_DsinoData,
sizeof(hipfftReal)*m_iPaddedDetCount,
m_hSinoData + j*m_DetectorColumnCount*m_DetectorRowCount*m_nPrjBatchSize,
sizeof(float)*m_DetectorColumnCount,
sizeof(float)*m_DetectorColumnCount,
m_DetectorRowCount*m_nPrjBatchSize,
hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step1. Do FDK Preweighting
/// </summary>
hipLaunchKernelGGL(( Correction), dim3(DetGrid),dim3(Block), 0, 0, m_DsinoData, m_iPaddedDetCount, m_DCorrectionMatrix, m_DetectorColumnCount, m_DetectorRowCount, m_nPrjBatchSize);
err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step2. do Filter
/// </summary>
hipfftExecR2C(m_FwdFFT, m_DsinoData, m_DFilteredsinoData);
hipLaunchKernelGGL(( Filter_FFT), dim3(FFTGrid),dim3(Block), 0, 0, m_DFilteredsinoData, m_RampFilter, m_DetectorRowCount*m_nPrjBatchSize,
m_iHalfFFTSize);
hipfftExecC2R(m_BwdFFT, m_DFilteredsinoData, m_DsinoData);
err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step3. Bind Filtered Sinogram to Array and do Backprojection
/// </summary>
hipMemcpy3D(&cpy3d);
bindProjDataTexture(m_PrjArray);
hipLaunchKernelGGL(( BP), dim3(BPGrid),dim3(Block), 0, 0, m_DReconData, m_nReconDataPitch, j);
hipUnbindTexture(Prj_tex);
err = hipGetLastError();
if (err != hipSuccess && err != hipErrorSetOnActiveProcess)
{
isOK = false;
break;
}
}
}
////Frees all GPU resources associated with a cuFFT plan and destroys the internal plan data structure.
////This function should be called once a plan is no longer needed, to avoid wasting GPU memory
//hipfftDestroy(m_FwdFFT);
//hipfftDestroy(m_BwdFFT);
return isOK;
}
}
|
da93047e8ce6607d17bd4c23b67f336b49a523cd.cu
|
#include "mctCudaFDKAlgorithm.h"
texture<float, cudaTextureType3D,cudaReadModeElementType> Prj_tex;
__constant__ float cSrcAxisDist,cSrcDetDist;
__constant__ int cDetColumn,cDetRow;
__constant__ float cBinWidth,cBinHeight;
__constant__ int cReconWidth,cReconHeight,cReconSlice;
__constant__ float cPixSizeX,cPixSizeY, cPixSizeZ, cFOV,cCoef;
__constant__ int cPrjNum;
__constant__ float cInvBinWidth, cInvBinHeight;
__constant__ float cReconWidthMid, cReconHeightMid, cReconSliceMid;
__constant__ float cDetColumnRayCenter, cDetRowRayCenter;
__constant__ float cSDDInvBinHeight,cReconZminSDDInvBinHeight, cSDDInvBinHeightSliceSpace;
static const unsigned g_MaxAngles = 1440;
__constant__ float gC_angle_sin[g_MaxAngles];
__constant__ float gC_angle_cos[g_MaxAngles];
__global__ void Correction(float *dSinoData,int iPaddedDetCount,float *dCorrectionMatrix,int DetColumn,int DetRow,int PrjSize)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
if (i < DetColumn && j < DetRow*PrjSize)
{
dSinoData[j*iPaddedDetCount + i] *= dCorrectionMatrix[(j%DetRow)*DetColumn + i];
}
}
__global__ void Filter_FFT(cufftComplex *dFilteredSinoData,cufftComplex *dRampfilter,int DetRow,int iHalfFFTSize)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
if (i < iHalfFFTSize && j < DetRow)
{
dFilteredSinoData[j*iHalfFFTSize + i].x *= dRampfilter[i].x*cCoef;
dFilteredSinoData[j*iHalfFFTSize + i].y *= dRampfilter[i].x*cCoef;
}
}
__global__ void BP (float *dReconData, size_t Pitch, int iBatch)
{
int i = threadIdx.x + mul24(blockDim.x,blockIdx.x);
int j = threadIdx.y + mul24(blockDim.y,blockIdx.y);
int k = threadIdx.z + mul24(blockDim.z,blockIdx.z);
float x = (i-cReconWidthMid)*cPixSizeX;
float y = (j-cReconHeightMid)*cPixSizeY;
int p = iBatch*cPrjNum + k;
if ( x*x + y*y < cFOV*cFOV)
{
float t = x*gC_angle_cos[p]+y*gC_angle_sin[p];
float s = cSrcAxisDist + x*gC_angle_sin[p]-y*gC_angle_cos[p];
float L2 = t*t+s*s;
float m = atanf(t/s)*cInvBinWidth + cDetColumnRayCenter;
float n = rsqrtf(L2)*cReconZminSDDInvBinHeight + cDetRowRayCenter;
float dn = rsqrtf(L2)*cSDDInvBinHeightSliceSpace;
#pragma unroll
for (int iz = 0; iz < cReconSlice; iz++)
{
atomicAdd(dReconData+ iz*Pitch*cReconHeight +j*Pitch +i, tex3D(Prj_tex,m,n,k+0.5f)/L2);
n += dn;
}
}
}
int calcZeropadFactorPowerOfTwo(int n, int iZeropad)
{
if (iZeropad > 0)
{
double logBase2 = log((double)n) / log((double)2);
int nextPowerOf2 = static_cast<int>(floor(logBase2));
if (logBase2 != floor(logBase2))
nextPowerOf2++;
nextPowerOf2 += (iZeropad - 1);
n = 1 << nextPowerOf2;
}
return n;
}
namespace mct
{
const int CFDK::m_nBatch = 5;//4;
CFDK::CFDK()
{
this->InitParams();
}
CFDK::~CFDK(void)
{
this->FreeObjects();
}
void CFDK::InitParams()
{
this->m_PrjArray = NULL;
this->m_PrjArrayLen = 0;
this->m_DCorrectionMatrix = NULL;
this->m_DReconData = NULL;
this->m_DReconDataLenSize = 0;
this->m_DReconDataLenCount = 0;
this->m_DsinoData = NULL;
this->m_DsinoDataLen = 0;
this->m_DFilteredsinoData = NULL;
this->m_DFilteredsinoDataLen = 0;
this->m_RampFilter = NULL;
this->m_RampFilterLen = 0;
this->hReconData = NULL;
this->hReconDataLen = 0;
this->m_ProjectionAngleCountMatix = 0;
this->m_ProjectionAngleCountRampFilter = 0;
this->m_ProjectionAngleCountAngle = 0;
this->m_DetectorSpacingMatix = 0;
this->m_DetectorSpacingRampFilter = 0;
this->m_FFTLen = 0;
this->m_iPaddedDetCountOld =0;
}
void CFDK::FreeObjects()
{
cudaFreeArray(m_PrjArray);
m_PrjArray = NULL;
cudaFree(m_DCorrectionMatrix);
m_DCorrectionMatrix = NULL;
cudaFree(m_DReconData);
m_DReconData = NULL;
cudaFree(m_DsinoData);
m_DsinoData = NULL;
cudaFree(m_DFilteredsinoData);
m_DFilteredsinoData = NULL;
cudaFree(m_RampFilter);
m_RampFilter = NULL;
cufftDestroy(m_FwdFFT);
cufftDestroy(m_BwdFFT);
this->m_FFTLen = 0;
this->m_iPaddedDetCountOld =0;
if(this->hReconData != NULL)
{
delete[] this->hReconData;
this->hReconData = NULL;
this->hReconDataLen = 0;
}
}
bool CFDK::SetParams(ScannerGeometry scanGeometry, ProjectionParams prjParams, ReconstructionParams reconParams, float* hSinoData, int iGPUIndex)
{
SetPrjGeometry(scanGeometry, prjParams, reconParams);
if(!setGPUIndex(iGPUIndex))
{
return false;
}
if(!CpyToSymbol())
{
return false;
}
if(!allocateBuffers())
{
return false;
}
if(!genRampFilter())
{
return false;
}
if(!caculateCorrectMatix())
{
return false;
}
m_hSinoData = hSinoData;
return true;
}
void CFDK::SetPrjGeometry(ScannerGeometry scanGeometry, ProjectionParams prjParams, ReconstructionParams reconParams)
{
m_SourceToIsoCenterDistance = scanGeometry.m_SourceToIsoCenterDistance;
m_SourceToDetectorDistance = scanGeometry.m_SourceToDetectorDistance;
m_DetectorSpacingX = scanGeometry.m_DetectorSpacingX;
m_DetectorSpacingY = scanGeometry.m_DetectorSpacingY;
m_DetectorColumnCount = scanGeometry.m_DetectorColumnCount + scanGeometry.m_DetectorCount - 1;
m_DetectorRowCount = scanGeometry.m_DetectorRowCount;
m_DetectorColumnRayCenter = scanGeometry.m_DetectorColumnRayCenter;
m_DetectorRowRayCenter = scanGeometry.m_DetectorRowCount/2-0.5f; //scanGeometry.m_DetectorRowRayCenter;
m_ProjectionAngleCount = prjParams.m_ProjectionAngleCount;
m_ProjectionAngleStart = prjParams.m_ProjectionAngleStart;
m_ProjectionAngleStep = -2*PI/prjParams.m_ProjectionAnglesPerRotation;
m_DetectorLengthX = scanGeometry.m_DetectorSpacingX*m_DetectorColumnCount;
m_DetectorLengthY = scanGeometry.m_DetectorSpacingY*m_DetectorRowCount;
m_fFOV = m_SourceToIsoCenterDistance*sin(0.5f*(m_DetectorLengthX-m_DetectorSpacingX));
m_ReconColumnCount = reconParams.m_ReconColumnCount;
m_ReconRowCount = reconParams.m_ReconRowCount;
m_ReconSliceCount = reconParams.m_ReconSliceCount*reconParams.m_MergedNum; //调整为重建所有层,输出根据MergedNum进行输出 2015.12.16
m_nMergeNum = reconParams.m_MergedNum;
m_ReconWindowMidColumn = reconParams.m_ReconWindowMidColumn;
m_ReconWindowMidRow = reconParams.m_ReconWindowMidRow;
m_ReconWindowMidSlice = reconParams.m_ReconWindowMidSlice;
m_PixelSpacingX = reconParams.m_PixelSpacingX;
m_PixelSpacingY = reconParams.m_PixelSpacingY;
m_PixelSpacingZ = reconParams.m_PixelSpacingZ;
m_nPrjBatchSize = m_ProjectionAngleCount/m_nBatch;
m_iPaddedDetCount = calcZeropadFactorPowerOfTwo(2*m_DetectorColumnCount-1, 1);
m_iHalfFFTSize = (m_iPaddedDetCount/2 + 1);
}
bool CFDK::CpyToSymbol()
{
//可优化 2015.11.26
cudaMemcpyToSymbol(cSrcAxisDist,&m_SourceToIsoCenterDistance,sizeof(float));
cudaMemcpyToSymbol(cSrcDetDist,&m_SourceToDetectorDistance,sizeof(float));
cudaMemcpyToSymbol(cBinWidth,&m_DetectorSpacingX,sizeof(float));
cudaMemcpyToSymbol(cBinHeight,&m_DetectorSpacingY,sizeof(float));
cudaMemcpyToSymbol(cPixSizeX,&m_PixelSpacingX,sizeof(float));
cudaMemcpyToSymbol(cPixSizeY,&m_PixelSpacingY,sizeof(float));
cudaMemcpyToSymbol(cPixSizeZ,&m_PixelSpacingZ,sizeof(float));
cudaMemcpyToSymbol(cFOV,&m_fFOV,sizeof(float));
cudaMemcpyToSymbol(cDetColumn,&m_DetectorColumnCount,sizeof(int));
cudaMemcpyToSymbol(cDetRow,&m_DetectorRowCount,sizeof(int));
cudaMemcpyToSymbol(cReconWidth,&m_ReconColumnCount,sizeof(int));
cudaMemcpyToSymbol(cReconHeight,&m_ReconRowCount,sizeof(int));
cudaMemcpyToSymbol(cReconSlice,&m_ReconSliceCount,sizeof(int));
cudaMemcpyToSymbol(cPrjNum,&m_nPrjBatchSize,sizeof(int));
cudaMemcpyToSymbol(cReconWidthMid,&m_ReconWindowMidColumn,sizeof(float));
cudaMemcpyToSymbol(cReconHeightMid,&m_ReconWindowMidRow,sizeof(float));
cudaMemcpyToSymbol(cReconSliceMid,&m_ReconWindowMidSlice,sizeof(float));
cudaMemcpyToSymbol(cDetColumnRayCenter,&m_DetectorColumnRayCenter,sizeof(float));
cudaMemcpyToSymbol(cDetRowRayCenter,&m_DetectorRowRayCenter,sizeof(float));
float InvBinWidth = 1.f/m_DetectorSpacingX;
float InvBinHeight = 1.f/m_DetectorSpacingY;
cudaMemcpyToSymbol(cInvBinWidth,&InvBinWidth,sizeof(float));
cudaMemcpyToSymbol(cInvBinHeight,&InvBinHeight,sizeof(float));
float coef = 1.f/m_iPaddedDetCount*abs(m_DetectorSpacingX*m_ProjectionAngleStep*m_SourceToIsoCenterDistance);
cudaMemcpyToSymbol(cCoef,&coef,sizeof(float));
float SDDInvBinHeight = m_SourceToDetectorDistance/m_DetectorSpacingY;
float ReconZminSDDInvBinHeight = (-m_ReconWindowMidSlice)*m_PixelSpacingZ*SDDInvBinHeight;
float SDDInvBinHeightSliceSpace = m_SourceToDetectorDistance/m_DetectorSpacingY*m_PixelSpacingZ;
cudaMemcpyToSymbol(cSDDInvBinHeight,&SDDInvBinHeight,sizeof(float));
cudaMemcpyToSymbol(cReconZminSDDInvBinHeight,&ReconZminSDDInvBinHeight,sizeof(float));
cudaMemcpyToSymbol(cSDDInvBinHeightSliceSpace,&SDDInvBinHeightSliceSpace,sizeof(float));
if(this->m_ProjectionAngleCountAngle != m_ProjectionAngleCount)
{
float* angle_sin = new float[m_ProjectionAngleCount];
if(angle_sin == NULL)
return false;
float* angle_cos = new float[m_ProjectionAngleCount];
if(angle_cos == NULL)
{
delete []angle_sin;
return false;
}
float angles = m_ProjectionAngleStart; //TODO 目前起始角度都一致
for (unsigned int i = 0; i < m_ProjectionAngleCount; ++i)
{
angle_sin[i] = sinf(angles);
angle_cos[i] = cosf(angles);
angles += m_ProjectionAngleStep;
}
cudaMemcpyToSymbol(gC_angle_sin, angle_sin, m_ProjectionAngleCount*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(gC_angle_cos, angle_cos, m_ProjectionAngleCount*sizeof(float), 0, cudaMemcpyHostToDevice);
delete []angle_sin;
delete []angle_cos;
this->m_ProjectionAngleCountAngle = this->m_ProjectionAngleCount;
}
return true;
}
bool CFDK::setGPUIndex(int iGPUIndex)
{
cudaSetDevice(iGPUIndex);
cudaError_t err = cudaGetLastError();
// Ignore errors caused by calling cudaSetDevice multiple times
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
return false;
return true;
}
bool CFDK::allocateBuffers()
{
cudaError_t err;
int dsinoDataLen;
if((this->m_DReconData == NULL) || (this->m_DReconDataLenSize != m_ReconRowCount) ||(this->m_DReconDataLenCount!=m_ReconSliceCount))
{
if(this->m_DReconData != NULL)
{
cudaFree(this->m_DReconData);
this->m_DReconData == NULL;
this->m_DReconDataLenSize = 0;
this->m_DReconDataLenCount = 0;
}
/// <summary>
/// Allocate GPU Memory for Reconstruction Output
/// </summary>
err = cudaMallocPitch((void**)&this->m_DReconData, &m_nReconDataPitch, sizeof(float)*m_ReconColumnCount, m_ReconRowCount*m_ReconSliceCount);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DReconDataLenSize = m_ReconRowCount;
this->m_DReconDataLenCount = m_ReconSliceCount;
}
else
{
m_nReconDataPitch = sizeof(float)*m_ReconColumnCount;
}
err = cudaMemset2D(this->m_DReconData, m_nReconDataPitch, 0, sizeof(float)*m_ReconColumnCount, m_ReconRowCount*m_ReconSliceCount);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
m_nReconDataPitch = m_nReconDataPitch/sizeof(float);
dsinoDataLen = sizeof(cufftReal)*m_DetectorRowCount*m_iPaddedDetCount*m_nPrjBatchSize;
if((this->m_DCorrectionMatrix == NULL) || (this->m_DsinoDataLen != dsinoDataLen))
{
if(this->m_DCorrectionMatrix != NULL)
{
cudaFree(this->m_DCorrectionMatrix);
this->m_DCorrectionMatrix = NULL;
}
/// <summary>
/// Allocate GPU Memory for FDK correction mattrix
/// </summary>
err = cudaMalloc((void**)&this->m_DCorrectionMatrix, sizeof(float)*m_DetectorColumnCount*m_DetectorRowCount);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
}
if((this->m_DsinoData == NULL) || (this->m_DsinoDataLen != dsinoDataLen))
{
if(this->m_DsinoData != NULL)
{
cudaFree(this->m_DsinoData);
this->m_DsinoData = NULL;
}
/// <summary>
/// Allocate GPU Memory for Sinogram Data
/// 由于在滤波是需要补领,因此在分配内存时就特意分配大的内存
/// </summary>
err = cudaMalloc((void**)&this->m_DsinoData, dsinoDataLen);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DsinoDataLen = dsinoDataLen;
}
if((this->m_DFilteredsinoData ==NULL) || (this->m_DFilteredsinoDataLen != sizeof(cufftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize))
{
if(this->m_DFilteredsinoData !=NULL)
{
cudaFree(this->m_DFilteredsinoData);
this->m_DFilteredsinoData = NULL;
this->m_DFilteredsinoDataLen = 0;
}
/// <summary>
/// Allocate Memory for Filtered Sinogram Data
/// </summary>
err = cudaMalloc((void**)&this->m_DFilteredsinoData, sizeof(cufftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_DFilteredsinoDataLen = sizeof(cufftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize;
}
if((this->m_RampFilter ==NULL) || (this->m_RampFilterLen != sizeof(cufftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize))
{
if(this->m_RampFilter !=NULL)
{
cudaFree(this->m_RampFilter);
this->m_RampFilter = NULL;
this->m_RampFilterLen = 0;
}
/// <summary>
/// Allocate GPU Memory for Ramp Filter
/// </summary>
err = cudaMalloc((void**)&this->m_RampFilter, sizeof(cufftComplex)*m_iHalfFFTSize);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_RampFilterLen = sizeof(cufftComplex)*m_DetectorRowCount*m_iHalfFFTSize*m_nPrjBatchSize;
}
if((this->m_PrjArray == NULL) || (this->m_PrjArrayLen != dsinoDataLen))
{
if(this->m_PrjArray != NULL)
{
cudaFreeArray(this->m_PrjArray);
this->m_PrjArray = NULL;
this->m_PrjArrayLen = 0;
}
/// <summary>
/// Allocate GPU Memory for BackProjection Sinogram Array
/// </summary>
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaExtent extent = make_cudaExtent(m_DetectorColumnCount,m_DetectorRowCount,m_nPrjBatchSize);
err = cudaMalloc3DArray(&this->m_PrjArray,&channelDesc,extent);
if(cudaSuccess != err)
{
return false;
}
CUDA_ASSERT(err);
this->m_PrjArrayLen = dsinoDataLen;
}
if((this->m_iPaddedDetCountOld != m_iPaddedDetCount) || (this->m_FFTLen != m_DetectorRowCount*m_nPrjBatchSize))
{
if((this->m_iPaddedDetCountOld != 0) ||(this->m_FFTLen != 0))
{
//Frees all GPU resources associated with a cuFFT plan and destroys the internal plan data structure.
//This function should be called once a plan is no longer needed, to avoid wasting GPU memory
cufftDestroy(m_FwdFFT);
cufftDestroy(m_BwdFFT);
}
cufftPlan1d(&m_FwdFFT, m_iPaddedDetCount, CUFFT_R2C, m_DetectorRowCount*m_nPrjBatchSize);
cufftPlan1d(&m_BwdFFT, m_iPaddedDetCount, CUFFT_C2R, m_DetectorRowCount*m_nPrjBatchSize);
this->m_iPaddedDetCountOld = m_iPaddedDetCount;
this->m_FFTLen = m_DetectorRowCount*m_nPrjBatchSize;
}
return true;
}
bool CFDK::caculateCorrectMatix()
{
if((this->m_DetectorSpacingMatix != m_DetectorSpacingX) || (this->m_ProjectionAngleCountMatix!=this->m_ProjectionAngleCount)) //多次重建只有m_DetectorSpacingX和m_DetectorSpacingY,角度数变化,其它参数一致
{
float *hCorrectionMatrix = new float[m_DetectorColumnCount * m_DetectorRowCount ];
if(hCorrectionMatrix == NULL)
return false;
for (size_t j = 0; j < m_DetectorRowCount; j++)
{
float y = (j-m_DetectorRowRayCenter)*m_DetectorSpacingY;
float cosa = m_SourceToDetectorDistance/sqrt(m_SourceToDetectorDistance*m_SourceToDetectorDistance + y*y);
for (size_t i = 0; i < m_DetectorColumnCount; i++)
{
float x = (i-m_DetectorColumnRayCenter)*m_DetectorSpacingX;
hCorrectionMatrix[j*m_DetectorColumnCount+i] = cosa*cos(x);
}
}
cudaError_t err;
err = cudaMemcpy(m_DCorrectionMatrix, hCorrectionMatrix, sizeof(float)*m_DetectorColumnCount*m_DetectorRowCount, cudaMemcpyHostToDevice);
CUDA_ASSERT(err);
delete []hCorrectionMatrix;
this->m_DetectorSpacingMatix = m_DetectorSpacingX;
this->m_ProjectionAngleCountMatix =this->m_ProjectionAngleCount;
}
return true;
}
bool CFDK::genRampFilter()
{
if((this->m_DetectorSpacingRampFilter != m_DetectorSpacingX) ||(this->m_ProjectionAngleCountRampFilter!=this->m_ProjectionAngleCount))
{
float *rampFilter = new float[m_iPaddedDetCount];
if(rampFilter == NULL)
return false;
/// <summary>
/// Step 1: Caculate RampFilter Spatial Domain Response
/// </summary>
memset(rampFilter,0,sizeof(float)*m_iPaddedDetCount);
for (size_t i = 1;i < m_DetectorColumnCount;i += 2)
{
rampFilter[i] = rampFilter[m_iPaddedDetCount-i] = -1.f/(2*PI*PI*sin(i*m_DetectorSpacingX)*sin(i*m_DetectorSpacingX));
}
rampFilter[0] = 0.125f/(m_DetectorSpacingX*m_DetectorSpacingX);
/// <summary>
/// Step 2: Copy to GPU Memory
/// </summary>
float *DrampFilter;
cudaError_t err;
err = cudaMalloc((void**)&DrampFilter, sizeof(cufftReal)*m_iPaddedDetCount);
CUDA_ASSERT(err);
err = cudaMemcpy(DrampFilter, rampFilter, sizeof(cufftReal)*m_iPaddedDetCount, cudaMemcpyHostToDevice);
CUDA_ASSERT(err);
/// <summary>
/// Step 3: FFT and get RampFilter's frequency domain spectrumn
/// </summary>
cufftHandle RampFilterFFT;
cufftPlan1d(&RampFilterFFT, m_iPaddedDetCount, CUFFT_R2C, 1);
cufftExecR2C(RampFilterFFT,DrampFilter, m_RampFilter);
delete []rampFilter;
cudaFree(DrampFilter);
cufftDestroy(RampFilterFFT);
this->m_DetectorSpacingRampFilter = m_DetectorSpacingX;
this->m_ProjectionAngleCountRampFilter = this->m_ProjectionAngleCount;
//genFilter(E_FBPFILTER::FILTER_RAMLAK, m_fBinWidth, m_nDetColumn,
// 0, E_FILTER_GENERATION::FILTER_GENERATION_INVERSE_FOURIER, m_RampFilter,
// 1, E_SCANNER::GEOMETRY_EQUIANGULAR, m_fSrcAxisDist, m_fSrcDetDist);
}
return true;
}
bool CFDK::bindProjDataTexture(const cudaArray* array)
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
Prj_tex.addressMode[0] = cudaAddressModeClamp;
Prj_tex.addressMode[1] = cudaAddressModeClamp;
Prj_tex.addressMode[2] = cudaAddressModeClamp;
Prj_tex.filterMode = cudaFilterModeLinear;
Prj_tex.normalized = false;
cudaBindTextureToArray(Prj_tex, array, channelDesc);
// TODO: error value?
return true;
}
bool CFDK::getReconstruction(float* hMergeReconData)
{
int reconDataLen = m_ReconColumnCount*m_ReconRowCount*m_ReconSliceCount;
if((this->hReconData == NULL) || (this->hReconDataLen != reconDataLen))
{
if(this->hReconData != NULL)
{
delete[] this->hReconData;
this->hReconData = NULL;
this->hReconDataLen = 0;
}
this->hReconData = new float[reconDataLen];
memset(hReconData, 0, sizeof(float)*reconDataLen);
this->hReconDataLen = reconDataLen;
}
cudaMemcpy2D(hReconData,
sizeof(float)*m_ReconColumnCount,
m_DReconData,
sizeof(float)*m_nReconDataPitch,
sizeof(float)*m_ReconColumnCount,
m_ReconRowCount*m_ReconSliceCount,
cudaMemcpyDeviceToHost );
float* pReconData;
float* pMergeData;
int dataCountPerSlice =m_ReconColumnCount*m_ReconRowCount;
for( int iSlice=0; iSlice<m_ReconSliceCount; iSlice=iSlice+m_nMergeNum)
{
int iSRIndex = iSlice*dataCountPerSlice;
int iSMIndex = dataCountPerSlice*iSlice/m_nMergeNum;
for(int iMatrix=0; iMatrix<dataCountPerSlice; iMatrix++ )
{
pReconData = hReconData + iSRIndex + iMatrix ;
pMergeData = hMergeReconData + iSMIndex + iMatrix;
float all=0;
for(int iM =0; iM < m_nMergeNum; ++iM)
{
all += *(pReconData + iM*dataCountPerSlice);
}
*pMergeData = all/m_nMergeNum;
}
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
return false;
}
return true;
}
bool CFDK::CallRecon()
{
bool isOK = true;
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
isOK = false;
}
if(isOK)
{
dim3 Block(16,16,1);
dim3 DetGrid((m_DetectorColumnCount+15)/16,(m_DetectorRowCount*m_nPrjBatchSize+15)/16,1);
dim3 FFTGrid((m_iHalfFFTSize+15)/16,(m_DetectorRowCount*m_nPrjBatchSize+15)/16,1);
dim3 BPGrid((m_ReconColumnCount+15)/16,(m_ReconRowCount+15)/16,m_nPrjBatchSize);
cudaMemcpy3DParms cpy3d = {0};
cpy3d.srcPtr = make_cudaPitchedPtr(m_DsinoData, sizeof(float)*m_iPaddedDetCount, sizeof(float)*m_DetectorColumnCount, m_DetectorRowCount);//
cpy3d.dstArray = m_PrjArray;
cpy3d.extent = make_cudaExtent(m_DetectorColumnCount, m_DetectorRowCount, m_nPrjBatchSize);
cpy3d.kind = cudaMemcpyDeviceToDevice;
for (int j = 0 ; j < m_nBatch; j++)
{
/// <summary>
/// Copy host Sinogram to GPU Memory
/// </summary>
cudaMemset(m_DsinoData, 0, sizeof(cufftReal)*m_DetectorRowCount*m_iPaddedDetCount* m_nPrjBatchSize);
cudaMemcpy2D(m_DsinoData,
sizeof(cufftReal)*m_iPaddedDetCount,
m_hSinoData + j*m_DetectorColumnCount*m_DetectorRowCount*m_nPrjBatchSize,
sizeof(float)*m_DetectorColumnCount,
sizeof(float)*m_DetectorColumnCount,
m_DetectorRowCount*m_nPrjBatchSize,
cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step1. Do FDK Preweighting
/// </summary>
Correction<<<DetGrid,Block>>>(m_DsinoData, m_iPaddedDetCount, m_DCorrectionMatrix, m_DetectorColumnCount, m_DetectorRowCount, m_nPrjBatchSize);
err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step2. do Filter
/// </summary>
cufftExecR2C(m_FwdFFT, m_DsinoData, m_DFilteredsinoData);
Filter_FFT<<<FFTGrid,Block>>>(m_DFilteredsinoData, m_RampFilter, m_DetectorRowCount*m_nPrjBatchSize,
m_iHalfFFTSize);
cufftExecC2R(m_BwdFFT, m_DFilteredsinoData, m_DsinoData);
err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
isOK = false;
break;
}
/// <summary>
/// Step3. Bind Filtered Sinogram to Array and do Backprojection
/// </summary>
cudaMemcpy3D(&cpy3d);
bindProjDataTexture(m_PrjArray);
BP<<<BPGrid,Block>>>(m_DReconData, m_nReconDataPitch, j);
cudaUnbindTexture(Prj_tex);
err = cudaGetLastError();
if (err != cudaSuccess && err != cudaErrorSetOnActiveProcess)
{
isOK = false;
break;
}
}
}
////Frees all GPU resources associated with a cuFFT plan and destroys the internal plan data structure.
////This function should be called once a plan is no longer needed, to avoid wasting GPU memory
//cufftDestroy(m_FwdFFT);
//cufftDestroy(m_BwdFFT);
return isOK;
}
}
|
6f73276014c95e22b22756524d0c497b8c485468.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef LINUX // Only supported by gcc on Linux (defined in Makefile)
#define JITIFY_ENABLE_EMBEDDED_FILES 1
#endif
#define JITIFY_PRINT_INSTANTIATION 1
#define JITIFY_PRINT_SOURCE 1
#define JITIFY_PRINT_LOG 1
#define JITIFY_PRINT_PTX 1
#define JITIFY_PRINT_LINKER_LOG 1
#define JITIFY_PRINT_LAUNCH 1
#define JITIFY_PRINT_HEADER_PATHS 1
#ifdef LINUX // Only supported by gcc on Linux (defined in Makefile)
JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh);
#endif
__global__ void get_attribute_kernel(int *out, int *in) {
__shared__ int buffer[4096];
buffer[threadIdx.x] = in[threadIdx.x];
__syncthreads();
out[threadIdx.y] = buffer[threadIdx.x];
}
|
6f73276014c95e22b22756524d0c497b8c485468.cu
|
#include "includes.h"
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef LINUX // Only supported by gcc on Linux (defined in Makefile)
#define JITIFY_ENABLE_EMBEDDED_FILES 1
#endif
#define JITIFY_PRINT_INSTANTIATION 1
#define JITIFY_PRINT_SOURCE 1
#define JITIFY_PRINT_LOG 1
#define JITIFY_PRINT_PTX 1
#define JITIFY_PRINT_LINKER_LOG 1
#define JITIFY_PRINT_LAUNCH 1
#define JITIFY_PRINT_HEADER_PATHS 1
#ifdef LINUX // Only supported by gcc on Linux (defined in Makefile)
JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh);
#endif
__global__ void get_attribute_kernel(int *out, int *in) {
__shared__ int buffer[4096];
buffer[threadIdx.x] = in[threadIdx.x];
__syncthreads();
out[threadIdx.y] = buffer[threadIdx.x];
}
|
6549628e035656d24cc5683f1f433bf65184349d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test.h"
// 2018/04/20 apply GPU acceleration
#define pow2(x) (1.0*(x)*(x))
__device__ const double PI = 3.141592653589793;
__global__ void BackProjection(const float *dev_R, float *dev_Display, bool * dev_signal)
{
const unsigned int Tindex = threadIdx.x;
const unsigned int Bindex = blockIdx.x;
int index = Bindex * 256 + Tindex;
__syncthreads();
//for (int num = 0;num<256*16;num++)
//{
// if (num == 0)
// {
while (1) {
if (!dev_signal[Tindex]) {
dev_signal[Tindex] = true;
dev_Display[Tindex] += /*dev_R[index]*/Bindex;
dev_signal[Tindex] = false;
__threadfence();
break;
}
}
// }
//}
//__threadfence();
//dev_Display[Tindex] = 3;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t FDKpro(float *Display, const float *R)
{
float* dev_Display = 0; bool *dev_signal = 0; float* dev_R = 0;
int LR = 16 * 256; int LD = 256;
const dim3 thread_cubic(256, 1, 1);
const dim3 block_cubic(16, 1, 1);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
mexPrintf("hipSetDevice failed! Do you have a CUDA-capable GPU installed? %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("call for space in GPU\n");
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = hipMalloc((void**)&dev_R, LR * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_R hipMalloc failed!\n");
mexPrintf("dev_R hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_Display, LD * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Pdomain hipMalloc failed!\n");
mexPrintf("dev_Pdomain hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
hipMemset(dev_Display, 0, sizeof(float));
cudaStatus = hipMalloc((void**)&dev_signal, LD * sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Pdomain hipMalloc failed!\n");
mexPrintf("dev_Pdomain hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
hipMemset(dev_signal, false, sizeof(bool));
//mexPrintf("copy data in CPU to GPU\n");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_R, R, LR * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy R failed!\n");
mexPrintf("hipMemcpy R failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//Backprojection
BackProjection << <block_cubic, thread_cubic >> > (dev_R, dev_Display, dev_signal);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
mexPrintf("hipDeviceSynchronize returned error code %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Display, dev_Display, LD * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
mexPrintf("hipMemcpy dev_Display failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
Error:
hipFree(dev_R);
hipFree(dev_Display);
mexPrintf("Exit FDK\n");
return cudaStatus;
}
|
6549628e035656d24cc5683f1f433bf65184349d.cu
|
#include "test.h"
// 2018/04/20 apply GPU acceleration
#define pow2(x) (1.0*(x)*(x))
__device__ const double PI = 3.141592653589793;
__global__ void BackProjection(const float *dev_R, float *dev_Display, bool * dev_signal)
{
const unsigned int Tindex = threadIdx.x;
const unsigned int Bindex = blockIdx.x;
int index = Bindex * 256 + Tindex;
__syncthreads();
//for (int num = 0;num<256*16;num++)
//{
// if (num == 0)
// {
while (1) {
if (!dev_signal[Tindex]) {
dev_signal[Tindex] = true;
dev_Display[Tindex] += /*dev_R[index]*/Bindex;
dev_signal[Tindex] = false;
__threadfence();
break;
}
}
// }
//}
//__threadfence();
//dev_Display[Tindex] = 3;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t FDKpro(float *Display, const float *R)
{
float* dev_Display = 0; bool *dev_signal = 0; float* dev_R = 0;
int LR = 16 * 256; int LD = 256;
const dim3 thread_cubic(256, 1, 1);
const dim3 block_cubic(16, 1, 1);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
mexPrintf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed? %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("call for space in GPU\n");
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = cudaMalloc((void**)&dev_R, LR * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_R cudaMalloc failed!\n");
mexPrintf("dev_R cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_Display, LD * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Pdomain cudaMalloc failed!\n");
mexPrintf("dev_Pdomain cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaMemset(dev_Display, 0, sizeof(float));
cudaStatus = cudaMalloc((void**)&dev_signal, LD * sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Pdomain cudaMalloc failed!\n");
mexPrintf("dev_Pdomain cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaMemset(dev_signal, false, sizeof(bool));
//mexPrintf("copy data in CPU to GPU\n");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_R, R, LR * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy R failed!\n");
mexPrintf("cudaMemcpy R failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//Backprojection
BackProjection << <block_cubic, thread_cubic >> > (dev_R, dev_Display, dev_signal);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
mexPrintf("cudaDeviceSynchronize returned error code %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Display, dev_Display, LD * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
mexPrintf("cudaMemcpy dev_Display failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
Error:
cudaFree(dev_R);
cudaFree(dev_Display);
mexPrintf("Exit FDK\n");
return cudaStatus;
}
|
5b880461d80604a8749216d2d441da9469af6066.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target, amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %6 = sdiv i64 %5, 4
// HOST: %7 = sitofp i64 %6 to float
// HOST: ret float %7
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
|
5b880461d80604a8749216d2d441da9469af6066.cu
|
// REQUIRES: x86-registered-target, amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,DEV,NORDC-D %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.dev
// RUN: cat %t.dev | FileCheck -check-prefixes=COMMON,DEV,RDC-D %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s | FileCheck \
// RUN: -check-prefixes=COMMON,HOST,NORDC %s
// RUN: %clang_cc1 -triple x86_64-gnu-linux -std=c++11 \
// RUN: -emit-llvm -fgpu-rdc -cuid=abc -o - -x hip %s > %t.host
// RUN: cat %t.host | FileCheck -check-prefixes=COMMON,HOST,RDC %s
// Check device and host compilation use the same postfix for static
// variable name.
// RUN: cat %t.dev %t.host | FileCheck -check-prefix=POSTFIX %s
#include "Inputs/cuda.h"
struct vec {
float x,y,z;
};
// DEV-DAG: @x.managed = addrspace(1) externally_initialized global i32 1, align 4
// DEV-DAG: @x = addrspace(1) externally_initialized global i32 addrspace(1)* null
// NORDC-DAG: @x.managed = internal global i32 1
// RDC-DAG: @x.managed = global i32 1
// NORDC-DAG: @x = internal externally_initialized global i32* null
// RDC-DAG: @x = externally_initialized global i32* null
// HOST-DAG: @[[DEVNAMEX:[0-9]+]] = {{.*}}c"x\00"
__managed__ int x = 1;
// DEV-DAG: @v.managed = addrspace(1) externally_initialized global [100 x %struct.vec] zeroinitializer, align 4
// DEV-DAG: @v = addrspace(1) externally_initialized global [100 x %struct.vec] addrspace(1)* null
__managed__ vec v[100];
// DEV-DAG: @v2.managed = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> <{ %struct.vec { float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 }, [99 x %struct.vec] zeroinitializer }>, align 4
// DEV-DAG: @v2 = addrspace(1) externally_initialized global <{ %struct.vec, [99 x %struct.vec] }> addrspace(1)* null
__managed__ vec v2[100] = {{1, 1, 1}};
// DEV-DAG: @ex.managed = external addrspace(1) global i32, align 4
// DEV-DAG: @ex = external addrspace(1) externally_initialized global i32 addrspace(1)*
// HOST-DAG: @ex.managed = external global i32
// HOST-DAG: @ex = external externally_initialized global i32*
extern __managed__ int ex;
// NORDC-D-DAG: @_ZL2sx.managed = addrspace(1) externally_initialized global i32 1, align 4
// NORDC-D-DAG: @_ZL2sx = addrspace(1) externally_initialized global i32 addrspace(1)* null
// RDC-D-DAG: @_ZL2sx.static.[[HASH:.*]].managed = addrspace(1) externally_initialized global i32 1, align 4
// RDC-D-DAG: @_ZL2sx.static.[[HASH]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// HOST-DAG: @_ZL2sx.managed = internal global i32 1
// HOST-DAG: @_ZL2sx = internal externally_initialized global i32* null
// NORDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx\00"
// RDC-DAG: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH:.*]]\00"
// POSTFIX: @_ZL2sx.static.[[HASH:.*]] = addrspace(1) externally_initialized global i32 addrspace(1)* null
// POSTFIX: @[[DEVNAMESX:[0-9]+]] = {{.*}}c"_ZL2sx.static.[[HASH]]\00"
static __managed__ int sx = 1;
// DEV-DAG: @llvm.compiler.used
// DEV-SAME-DAG: @x.managed
// DEV-SAME-DAG: @x
// DEV-SAME-DAG: @v.managed
// DEV-SAME-DAG: @v
// DEV-SAME-DAG: @_ZL2sx.managed
// DEV-SAME-DAG: @_ZL2sx
// Force ex and sx mitted in device compilation.
__global__ void foo(int *z) {
*z = x + ex + sx;
v[1].x = 2;
}
// Force ex and sx emitted in host compilatioin.
int foo2() {
return ex + sx;
}
// COMMON-LABEL: define {{.*}}@_Z4loadv()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load() {
return x;
}
// COMMON-LABEL: define {{.*}}@_Z5storev()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @x, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32 2, i32* %0, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32 2, i32* %ld.managed, align 4
__device__ __host__ void store() {
x = 2;
}
// COMMON-LABEL: define {{.*}}@_Z10addr_takenv()
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: store i32* %0, i32** %p.ascast, align 8
// DEV: %1 = load i32*, i32** %p.ascast, align 8
// DEV: store i32 3, i32* %1, align 4
// HOST: %ld.managed = load i32*, i32** @x, align 4
// HOST: store i32* %ld.managed, i32** %p, align 8
// HOST: %0 = load i32*, i32** %p, align 8
// HOST: store i32 3, i32* %0, align 4
__device__ __host__ void addr_taken() {
int *p = &x;
*p = 3;
}
// HOST-LABEL: define {{.*}}@_Z5load2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = load float, float* %0, align 4
// HOST: ret float %1
__device__ __host__ float load2() {
return v[1].x;
}
// HOST-LABEL: define {{.*}}@_Z5load3v()
// HOST: %ld.managed = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %0 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed to [100 x %struct.vec]*
// HOST: %1 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %0, i64 0, i64 1, i32 1
// HOST: %2 = load float, float* %1, align 4
// HOST: ret float %2
float load3() {
return v2[1].y;
}
// HOST-LABEL: define {{.*}}@_Z11addr_taken2v()
// HOST: %ld.managed = load [100 x %struct.vec]*, [100 x %struct.vec]** @v, align 16
// HOST: %0 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %ld.managed, i64 0, i64 1, i32 0
// HOST: %1 = ptrtoint float* %0 to i64
// HOST: %ld.managed1 = load <{ %struct.vec, [99 x %struct.vec] }>*, <{ %struct.vec, [99 x %struct.vec] }>** @v2, align 16
// HOST: %2 = bitcast <{ %struct.vec, [99 x %struct.vec] }>* %ld.managed1 to [100 x %struct.vec]*
// HOST: %3 = getelementptr inbounds [100 x %struct.vec], [100 x %struct.vec]* %2, i64 0, i64 1, i32 1
// HOST: %4 = ptrtoint float* %3 to i64
// HOST: %5 = sub i64 %4, %1
// HOST: %6 = sdiv i64 %5, 4
// HOST: %7 = sitofp i64 %6 to float
// HOST: ret float %7
float addr_taken2() {
return (float)reinterpret_cast<long>(&(v2[1].y)-&(v[1].x));
}
// COMMON-LABEL: define {{.*}}@_Z5load4v()
// DEV: %ld.managed = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @ex, align 4
// DEV: %0 = addrspacecast i32 addrspace(1)* %ld.managed to i32*
// DEV: %1 = load i32, i32* %0, align 4
// DEV: ret i32 %1
// HOST: %ld.managed = load i32*, i32** @ex, align 4
// HOST: %0 = load i32, i32* %ld.managed, align 4
// HOST: ret i32 %0
__device__ __host__ int load4() {
return ex;
}
// HOST-DAG: __hipRegisterManagedVar({{.*}}@x {{.*}}@x.managed {{.*}}@[[DEVNAMEX]]{{.*}}, i64 4, i32 4)
// HOST-DAG: __hipRegisterManagedVar({{.*}}@_ZL2sx {{.*}}@_ZL2sx.managed {{.*}}@[[DEVNAMESX]]
// HOST-NOT: __hipRegisterManagedVar({{.*}}@ex {{.*}}@ex.managed
// HOST-DAG: declare void @__hipRegisterManagedVar(i8**, i8*, i8*, i8*, i64, i32)
|
7edea4247529860a50573a7187f57249343c2ffd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include "LevelSet/extrapol.h"
#include "LevelSet/dimdef.h"
void exta(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double* d_Phi_d,
double* rs_d,
double* d_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag);
extern "C"{
void callCUDA(
double* pressg,
double* velocity,
double* phiS,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz,
double dtext
)
{
unsigned int Offset = Nx*Ny*Nz;
double *pressg_d, *velocity_d, *phiS_d, *jbn_d,
*rs_d, *d_Phi_d, *extVal_d,
*d_d;
hipMalloc((void**)&pressg_d,sizeof(double)*Offset);
hipMalloc((void**)&velocity_d,sizeof(double)*3*Offset);
hipMalloc((void**)&phiS_d,sizeof(double)*Offset);
hipMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
hipMalloc((void**)&d_d,sizeof(double)*Offset);
hipMalloc((void**)&extVal_d,sizeof(double)*Offset);
hipMalloc((void**)&d_Phi_d,sizeof(double)*3*Offset);
hipMalloc((void**)&rs_d,sizeof(double)*Offset);
hipMemcpy(pressg_d, pressg, sizeof(double)*Offset,
hipMemcpyHostToDevice );
hipMemcpy(velocity_d,velocity,sizeof(double)*3*Offset,
hipMemcpyHostToDevice );
hipMemcpy(phiS_d, phiS, sizeof(double)*Offset,
hipMemcpyHostToDevice );
hipMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
hipMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
hipLaunchKernelGGL(( DevFirstOrder_LS), dim3(DimGrid), dim3(DimBlock), 0, 0,
d_Phi_d,
phiS_d,
jbn_d,
deltaX,
deltaY,
deltaZ,
Nx,
Ny,
Nz
);
// Extrapolating Velocity liquid variables
exta(velocity_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" U Velocity Liquid \n");
exta(&(velocity_d[1*Offset]), phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" V Velocity Liquid \n");
exta(&(velocity_d[2*Offset]), phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" W Velocity Liquid \n");
// Extrapolating Gas Pressure Variable
exta(pressg_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, 1);
printf(" Pressure Gas \n");
// Returning values from Device to Host
hipMemcpy(velocity,velocity_d,sizeof(double)*3*Offset,
hipMemcpyDeviceToHost );
hipMemcpy(pressg,pressg_d,sizeof(double)*Offset,
hipMemcpyDeviceToHost );
hipFree(pressg_d);
hipFree(velocity_d);
hipFree(jbn_d);
hipFree(phiS_d);
hipFree(d_Phi_d);
hipFree(d_d);
hipFree(extVal_d);
hipFree(rs_d);
return;
}
}
extern "C"{
void normalLEvelSetCUDA(
double* nV,
double* phiS,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
double *nV_d, *phiS_d, *jbn_d;
hipMalloc((void**)&nV_d,sizeof(double)*3*Offset);
hipMalloc((void**)&phiS_d,sizeof(double)*Offset);
hipMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
hipMemcpy(phiS_d, phiS, sizeof(double)*Offset,
hipMemcpyHostToDevice );
hipMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
hipMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
hipLaunchKernelGGL(( DevFirstOrder_LS), dim3(DimGrid), dim3(DimBlock), 0, 0,
nV_d,
phiS_d,
jbn_d,
deltaX,
deltaY,
deltaZ,
Nx,
Ny,
Nz
);
// Returning values from Device to Host
hipMemcpy(nV,nV_d,sizeof(double)*3*Offset,
hipMemcpyDeviceToHost );
hipFree(jbn_d);
hipFree(phiS_d);
hipFree(nV_d);
return;
}
}
extern "C"{
void extrapolVarCUDA(
double* valToExt,
double* phiS,
double* d_Phi,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz,
double dtext
)
{
unsigned int Offset = Nx*Ny*Nz;
double *valToExt_d, *phiS_d, *jbn_d,
*rs_d, *d_Phi_d, *extVal_d,
*d_d;
hipMalloc((void**)&valToExt_d,sizeof(double)*Offset);
hipMalloc((void**)&phiS_d,sizeof(double)*Offset);
hipMalloc((void**)&d_Phi_d,sizeof(double)*3*Offset);
hipMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
hipMalloc((void**)&extVal_d,sizeof(double)*Offset);
hipMalloc((void**)&rs_d,sizeof(double)*Offset);
hipMalloc((void**)&d_d,sizeof(double)*Offset);
hipMemcpy(valToExt_d,valToExt,sizeof(double)*Offset,
hipMemcpyHostToDevice );
hipMemcpy(phiS_d, phiS, sizeof(double)*Offset,
hipMemcpyHostToDevice );
hipMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
hipMemcpyHostToDevice );
hipMemcpy(d_Phi_d,d_Phi,sizeof(double)*3*Offset,
hipMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
// Extrapolating Velocity liquid variables
exta(valToExt_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, 1);
printf(" Ext-Some Val \n");
// Returning values from Device to Host
hipMemcpy(valToExt,valToExt_d,sizeof(double)*Offset,
hipMemcpyDeviceToHost );
hipFree(valToExt_d);
hipFree(jbn_d);
hipFree(phiS_d);
hipFree(d_Phi_d);
hipFree(d_d);
hipFree(extVal_d);
hipFree(rs_d);
return;
}
}
void exta(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double* d_Phi_d,
double* rs_d,
double* d_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag
)
{
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
printf("\n\n Extrapolating on CUDA Device: \n");
for(int itera = 1 ; itera <=10 ; itera++){
hipLaunchKernelGGL(( extrapolKernel), dim3(DimGrid), dim3(DimBlock), 0, 0,
rs_d,
extVal_d, phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag
);
hipLaunchKernelGGL(( RunGK_FirstS), dim3(DimGrid), dim3(DimBlock), 0, 0, d_d, extVal_d,
dtext, rs_d, Nx, Ny, Nz);
hipLaunchKernelGGL(( extrapolKernel), dim3(DimGrid), dim3(DimBlock), 0, 0,
rs_d, d_d,
phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag);
hipLaunchKernelGGL(( RunGK_SecondS), dim3(DimGrid), dim3(DimBlock), 0, 0, d_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
hipLaunchKernelGGL(( extrapolKernel), dim3(DimGrid), dim3(DimBlock), 0, 0,
rs_d, d_d,
phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag);
hipLaunchKernelGGL(( RunGK_ThirdS), dim3(DimGrid), dim3(DimBlock), 0, 0, extVal_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
}
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
void cuExtrapolation(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag
)
{
double *d_dPhi;
double *rs_d;
double *d_d;
int Offset = Nx*Ny*Nz;
hipMalloc((void**)&d_dPhi, 3*sizeof(double)*Offset);
hipMalloc((void**)&rs_d, sizeof(double)*Offset);
hipMalloc((void**)&d_d, sizeof(double)*Offset);
int numGBX, numGBY,numGBZ;
dim3 dimBlock(10,10,5);
numGBX = Nx / 10;
numGBY = Ny / 10;
numGBZ = Nz / 5;
dim3 dimGrid(numGBX,numGBY,numGBZ);
hipLaunchKernelGGL(( DevFirstOrder_LS), dim3(dimGrid), dim3(dimBlock), 0, 0,
d_dPhi,
phiS_d,
jbn_d,
1.0/deltaX,
1.0/deltaY,
1.0/deltaZ,
Nx,
Ny,
Nz
);
printf("\n\n Extrapolating on CUDA Device: \n");
for(int itera = 1 ; itera <=10 ; itera++){
hipLaunchKernelGGL(( extrapolKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rs_d,
extVal_d, phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag
);
hipLaunchKernelGGL(( RunGK_FirstS), dim3(dimGrid), dim3(dimBlock), 0, 0, d_d, extVal_d,
dtext, rs_d, Nx, Ny, Nz);
hipLaunchKernelGGL(( extrapolKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rs_d, d_d,
phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag);
hipLaunchKernelGGL(( RunGK_SecondS), dim3(dimGrid), dim3(dimBlock), 0, 0, d_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
hipLaunchKernelGGL(( extrapolKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rs_d, d_d,
phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag);
hipLaunchKernelGGL(( RunGK_ThirdS), dim3(dimGrid), dim3(dimBlock), 0, 0, extVal_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
}
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipFree(d_d);
hipFree(d_dPhi);
hipFree(rs_d);
}
|
7edea4247529860a50573a7187f57249343c2ffd.cu
|
#include<cuda.h>
#include<stdio.h>
#include "LevelSet/extrapol.h"
#include "LevelSet/dimdef.h"
void exta(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double* d_Phi_d,
double* rs_d,
double* d_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag);
extern "C"{
void callCUDA(
double* pressg,
double* velocity,
double* phiS,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz,
double dtext
)
{
unsigned int Offset = Nx*Ny*Nz;
double *pressg_d, *velocity_d, *phiS_d, *jbn_d,
*rs_d, *d_Phi_d, *extVal_d,
*d_d;
cudaMalloc((void**)&pressg_d,sizeof(double)*Offset);
cudaMalloc((void**)&velocity_d,sizeof(double)*3*Offset);
cudaMalloc((void**)&phiS_d,sizeof(double)*Offset);
cudaMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
cudaMalloc((void**)&d_d,sizeof(double)*Offset);
cudaMalloc((void**)&extVal_d,sizeof(double)*Offset);
cudaMalloc((void**)&d_Phi_d,sizeof(double)*3*Offset);
cudaMalloc((void**)&rs_d,sizeof(double)*Offset);
cudaMemcpy(pressg_d, pressg, sizeof(double)*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(velocity_d,velocity,sizeof(double)*3*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(phiS_d, phiS, sizeof(double)*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
cudaMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
DevFirstOrder_LS<<<DimGrid, DimBlock>>>(
d_Phi_d,
phiS_d,
jbn_d,
deltaX,
deltaY,
deltaZ,
Nx,
Ny,
Nz
);
// Extrapolating Velocity liquid variables
exta(velocity_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" U Velocity Liquid \n");
exta(&(velocity_d[1*Offset]), phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" V Velocity Liquid \n");
exta(&(velocity_d[2*Offset]), phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, -1);
printf(" W Velocity Liquid \n");
// Extrapolating Gas Pressure Variable
exta(pressg_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, 1);
printf(" Pressure Gas \n");
// Returning values from Device to Host
cudaMemcpy(velocity,velocity_d,sizeof(double)*3*Offset,
cudaMemcpyDeviceToHost );
cudaMemcpy(pressg,pressg_d,sizeof(double)*Offset,
cudaMemcpyDeviceToHost );
cudaFree(pressg_d);
cudaFree(velocity_d);
cudaFree(jbn_d);
cudaFree(phiS_d);
cudaFree(d_Phi_d);
cudaFree(d_d);
cudaFree(extVal_d);
cudaFree(rs_d);
return;
}
}
extern "C"{
void normalLEvelSetCUDA(
double* nV,
double* phiS,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
double *nV_d, *phiS_d, *jbn_d;
cudaMalloc((void**)&nV_d,sizeof(double)*3*Offset);
cudaMalloc((void**)&phiS_d,sizeof(double)*Offset);
cudaMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
cudaMemcpy(phiS_d, phiS, sizeof(double)*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
cudaMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
DevFirstOrder_LS<<<DimGrid, DimBlock>>>(
nV_d,
phiS_d,
jbn_d,
deltaX,
deltaY,
deltaZ,
Nx,
Ny,
Nz
);
// Returning values from Device to Host
cudaMemcpy(nV,nV_d,sizeof(double)*3*Offset,
cudaMemcpyDeviceToHost );
cudaFree(jbn_d);
cudaFree(phiS_d);
cudaFree(nV_d);
return;
}
}
extern "C"{
void extrapolVarCUDA(
double* valToExt,
double* phiS,
double* d_Phi,
double* jbn,
double deltaX,
double deltaY,
double deltaZ,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz,
double dtext
)
{
unsigned int Offset = Nx*Ny*Nz;
double *valToExt_d, *phiS_d, *jbn_d,
*rs_d, *d_Phi_d, *extVal_d,
*d_d;
cudaMalloc((void**)&valToExt_d,sizeof(double)*Offset);
cudaMalloc((void**)&phiS_d,sizeof(double)*Offset);
cudaMalloc((void**)&d_Phi_d,sizeof(double)*3*Offset);
cudaMalloc((void**)&jbn_d,sizeof(double)*11*Offset);
cudaMalloc((void**)&extVal_d,sizeof(double)*Offset);
cudaMalloc((void**)&rs_d,sizeof(double)*Offset);
cudaMalloc((void**)&d_d,sizeof(double)*Offset);
cudaMemcpy(valToExt_d,valToExt,sizeof(double)*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(phiS_d, phiS, sizeof(double)*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(jbn_d, jbn, sizeof(double)*11*Offset,
cudaMemcpyHostToDevice );
cudaMemcpy(d_Phi_d,d_Phi,sizeof(double)*3*Offset,
cudaMemcpyHostToDevice );
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
// Extrapolating Velocity liquid variables
exta(valToExt_d, phiS_d, jbn_d, d_Phi_d, rs_d, d_d,
deltaX, deltaY, deltaZ, Nx, Ny, Nz,
dtext, 1);
printf(" Ext-Some Val \n");
// Returning values from Device to Host
cudaMemcpy(valToExt,valToExt_d,sizeof(double)*Offset,
cudaMemcpyDeviceToHost );
cudaFree(valToExt_d);
cudaFree(jbn_d);
cudaFree(phiS_d);
cudaFree(d_Phi_d);
cudaFree(d_d);
cudaFree(extVal_d);
cudaFree(rs_d);
return;
}
}
void exta(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double* d_Phi_d,
double* rs_d,
double* d_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag
)
{
dim3 DimBlock(BLOCKDMX,BLOCKDMY,BLOCKDMZ);
dim3 DimGrid(GRIDMX,GRIDMY,GRIDMZ);
printf("\n\n Extrapolating on CUDA Device: \n");
for(int itera = 1 ; itera <=10 ; itera++){
extrapolKernel<<<DimGrid, DimBlock>>>(
rs_d,
extVal_d, phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag
);
RunGK_FirstS<<<DimGrid, DimBlock>>>( d_d, extVal_d,
dtext, rs_d, Nx, Ny, Nz);
extrapolKernel<<<DimGrid, DimBlock>>>(
rs_d, d_d,
phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag);
RunGK_SecondS<<<DimGrid, DimBlock>>>( d_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
extrapolKernel<<<DimGrid, DimBlock>>>(
rs_d, d_d,
phiS_d, jbn_d, d_Phi_d,
deltaX, deltaY, deltaZ,
Nx, Ny, Nz,
Flag);
RunGK_ThirdS<<<DimGrid, DimBlock>>>( extVal_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
}
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
void cuExtrapolation(
double* extVal_d,
double* phiS_d,
double* jbn_d,
double deltaX, double deltaY, double deltaZ,
int Nx, int Ny, int Nz,
double dtext,
int Flag
)
{
double *d_dPhi;
double *rs_d;
double *d_d;
int Offset = Nx*Ny*Nz;
cudaMalloc((void**)&d_dPhi, 3*sizeof(double)*Offset);
cudaMalloc((void**)&rs_d, sizeof(double)*Offset);
cudaMalloc((void**)&d_d, sizeof(double)*Offset);
int numGBX, numGBY,numGBZ;
dim3 dimBlock(10,10,5);
numGBX = Nx / 10;
numGBY = Ny / 10;
numGBZ = Nz / 5;
dim3 dimGrid(numGBX,numGBY,numGBZ);
DevFirstOrder_LS<<<dimGrid, dimBlock>>>(
d_dPhi,
phiS_d,
jbn_d,
1.0/deltaX,
1.0/deltaY,
1.0/deltaZ,
Nx,
Ny,
Nz
);
printf("\n\n Extrapolating on CUDA Device: \n");
for(int itera = 1 ; itera <=10 ; itera++){
extrapolKernel<<<dimGrid, dimBlock>>>(
rs_d,
extVal_d, phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag
);
RunGK_FirstS<<<dimGrid, dimBlock>>>( d_d, extVal_d,
dtext, rs_d, Nx, Ny, Nz);
extrapolKernel<<<dimGrid, dimBlock>>>(
rs_d, d_d,
phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag);
RunGK_SecondS<<<dimGrid, dimBlock>>>( d_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
extrapolKernel<<<dimGrid, dimBlock>>>(
rs_d, d_d,
phiS_d, jbn_d, d_dPhi,
1.0/deltaX, 1.0/deltaY, 1.0/deltaZ,
Nx, Ny, Nz,
Flag);
RunGK_ThirdS<<<dimGrid, dimBlock>>>( extVal_d,
extVal_d, d_d,
dtext, rs_d,
Nx, Ny, Nz);
}
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaFree(d_d);
cudaFree(d_dPhi);
cudaFree(rs_d);
}
|
f2889c1ea526599fc3072600f77e5d71d37e142b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ep/include/primitive/add.h"
#include "oneflow/core/ep/cuda/primitive/type_seq.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace ep {
namespace primitive {
namespace {
template<typename... Args>
struct AddFunctor;
template<typename T>
struct AddFunctor<T> {
__device__ T operator()(T x) const { return x; }
};
template<typename T, typename U, typename... Args>
struct AddFunctor<T, U, Args...> {
__device__ T operator()(T x0, U x1, Args... xs) const {
return x0 + AddFunctor<U, Args...>()(x1, xs...);
}
};
template<typename T, typename... Args>
__global__ void AddGpu(const Args*... srcs, T* dst, size_t count) {
CUDA_1D_KERNEL_LOOP_T(size_t, i, count) { dst[i] = AddFunctor<Args...>()(srcs[i]...); }
}
template<typename T, typename... Args>
void LaunchAddGpu(hipStream_t stream, const Args*... srcs, T* dst, size_t count) {
hipLaunchKernelGGL(( AddGpu<T, Args...>)
, dim3(BlocksNum4ThreadsNum(count)), dim3(kCudaThreadsNumPerBlock), 0, stream, srcs..., dst, count);
}
template<typename T>
void DispatchLaunch(hipStream_t stream, const T* const* srcs, size_t arity, T* dst, size_t count) {
if (arity == 0) {
OF_CUDA_CHECK(hipMemsetAsync(dst, 0, count * sizeof(T), stream));
} else if (arity == 1) {
OF_CUDA_CHECK(hipMemcpyAsync(dst, srcs[0], count * sizeof(T), hipMemcpyDefault, stream));
} else if (arity == 2) {
OF_CUDA_CHECK((cuda::elementwise::Binary<AddFunctor<T, T>, T, T, T>(
AddFunctor<T, T>(), count, dst, srcs[0], srcs[1], stream)));
} else if (arity == 3) {
OF_CUDA_CHECK((cuda::elementwise::Ternary<AddFunctor<T, T, T>, T, T, T, T>(
AddFunctor<T, T, T>(), count, dst, srcs[0], srcs[1], srcs[2], stream)));
} else if (arity == 4) {
LaunchAddGpu<T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], dst, count);
} else if (arity == 5) {
LaunchAddGpu<T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], dst, count);
} else if (arity == 6) {
LaunchAddGpu<T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5],
dst, count);
} else if (arity == 7) {
LaunchAddGpu<T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, count);
} else if (arity == 8) {
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], srcs[7], dst, count);
} else {
DispatchLaunch(stream, srcs + 7, arity - 7, dst, count);
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, dst, count);
}
}
template<typename T>
class AddImpl : public Add {
public:
OF_DISALLOW_COPY_AND_MOVE(AddImpl);
AddImpl() = default;
~AddImpl() override = default;
using Add::Launch;
void Launch(Stream* stream, const void* const* srcs, size_t arity, void* dst,
size_t count) override {
hipStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream();
DispatchLaunch(cuda_stream, reinterpret_cast<const T* const*>(srcs), arity,
reinterpret_cast<T*>(dst), count);
}
};
template<typename T>
std::unique_ptr<Add> NewAdd() {
return std::unique_ptr<Add>(new AddImpl<T>());
}
class AddFactoryImpl : public AddFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(AddFactoryImpl);
AddFactoryImpl() = default;
~AddFactoryImpl() override = default;
std::unique_ptr<Add> New(DataType data_type) override {
#define MAKE_NEW_ADD_ENTRY(type_cpp, type_proto) {type_proto, NewAdd<type_cpp>},
static const std::map<DataType, std::function<std::unique_ptr<Add>()>> new_add_handle{
OF_PP_FOR_EACH_TUPLE(MAKE_NEW_ADD_ENTRY, CUDA_PRIMITIVE_ALL_TYPE_SEQ)};
#undef MAKE_NEW_ADD_ENTRY
const auto it = new_add_handle.find(data_type);
if (it != new_add_handle.end()) {
return it->second();
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, AddFactory, AddFactoryImpl);
} // namespace
} // namespace primitive
} // namespace ep
} // namespace oneflow
|
f2889c1ea526599fc3072600f77e5d71d37e142b.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ep/include/primitive/add.h"
#include "oneflow/core/ep/cuda/primitive/type_seq.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace ep {
namespace primitive {
namespace {
template<typename... Args>
struct AddFunctor;
template<typename T>
struct AddFunctor<T> {
__device__ T operator()(T x) const { return x; }
};
template<typename T, typename U, typename... Args>
struct AddFunctor<T, U, Args...> {
__device__ T operator()(T x0, U x1, Args... xs) const {
return x0 + AddFunctor<U, Args...>()(x1, xs...);
}
};
template<typename T, typename... Args>
__global__ void AddGpu(const Args*... srcs, T* dst, size_t count) {
CUDA_1D_KERNEL_LOOP_T(size_t, i, count) { dst[i] = AddFunctor<Args...>()(srcs[i]...); }
}
template<typename T, typename... Args>
void LaunchAddGpu(cudaStream_t stream, const Args*... srcs, T* dst, size_t count) {
AddGpu<T, Args...>
<<<BlocksNum4ThreadsNum(count), kCudaThreadsNumPerBlock, 0, stream>>>(srcs..., dst, count);
}
template<typename T>
void DispatchLaunch(cudaStream_t stream, const T* const* srcs, size_t arity, T* dst, size_t count) {
if (arity == 0) {
OF_CUDA_CHECK(cudaMemsetAsync(dst, 0, count * sizeof(T), stream));
} else if (arity == 1) {
OF_CUDA_CHECK(cudaMemcpyAsync(dst, srcs[0], count * sizeof(T), cudaMemcpyDefault, stream));
} else if (arity == 2) {
OF_CUDA_CHECK((cuda::elementwise::Binary<AddFunctor<T, T>, T, T, T>(
AddFunctor<T, T>(), count, dst, srcs[0], srcs[1], stream)));
} else if (arity == 3) {
OF_CUDA_CHECK((cuda::elementwise::Ternary<AddFunctor<T, T, T>, T, T, T, T>(
AddFunctor<T, T, T>(), count, dst, srcs[0], srcs[1], srcs[2], stream)));
} else if (arity == 4) {
LaunchAddGpu<T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], dst, count);
} else if (arity == 5) {
LaunchAddGpu<T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], dst, count);
} else if (arity == 6) {
LaunchAddGpu<T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5],
dst, count);
} else if (arity == 7) {
LaunchAddGpu<T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, count);
} else if (arity == 8) {
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], srcs[7], dst, count);
} else {
DispatchLaunch(stream, srcs + 7, arity - 7, dst, count);
LaunchAddGpu<T, T, T, T, T, T, T, T, T>(stream, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4],
srcs[5], srcs[6], dst, dst, count);
}
}
template<typename T>
class AddImpl : public Add {
public:
OF_DISALLOW_COPY_AND_MOVE(AddImpl);
AddImpl() = default;
~AddImpl() override = default;
using Add::Launch;
void Launch(Stream* stream, const void* const* srcs, size_t arity, void* dst,
size_t count) override {
cudaStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream();
DispatchLaunch(cuda_stream, reinterpret_cast<const T* const*>(srcs), arity,
reinterpret_cast<T*>(dst), count);
}
};
template<typename T>
std::unique_ptr<Add> NewAdd() {
return std::unique_ptr<Add>(new AddImpl<T>());
}
class AddFactoryImpl : public AddFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(AddFactoryImpl);
AddFactoryImpl() = default;
~AddFactoryImpl() override = default;
std::unique_ptr<Add> New(DataType data_type) override {
#define MAKE_NEW_ADD_ENTRY(type_cpp, type_proto) {type_proto, NewAdd<type_cpp>},
static const std::map<DataType, std::function<std::unique_ptr<Add>()>> new_add_handle{
OF_PP_FOR_EACH_TUPLE(MAKE_NEW_ADD_ENTRY, CUDA_PRIMITIVE_ALL_TYPE_SEQ)};
#undef MAKE_NEW_ADD_ENTRY
const auto it = new_add_handle.find(data_type);
if (it != new_add_handle.end()) {
return it->second();
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, AddFactory, AddFactoryImpl);
} // namespace
} // namespace primitive
} // namespace ep
} // namespace oneflow
|
0fd1e2954c7a546675a485cafa04938ee7e18029.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
09/12/2019
hmhuan-1612858
*/
#include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
// In each counting sort, we sort data in "src" and write result to "dst"
// Then, we swap these 2 pointers and go to the next counting sort
// At first, we assign "src = in" and "dest = out"
// However, the data pointed by "in" is read-only
// --> we create a copy of this data and assign "src" to the address of this copy
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit // histogram cua mang in xet tren digit hien tai
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0] = 0;
for (int i = 1; i < nBins; i++)
histScan[i] = histScan[i - 1] + hist[i - 1];
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++; // (neu cung bin thi ghi ben canh)
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// histogram kernel
__global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_bin[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int delta = (nBins - 1) / blockDim.x + 1;
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
s_bin[id] = 0;
}
__syncthreads();
if (i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_bin[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
atomicAdd(&hist[id], s_bin[id]);
}
}
// scan kernel
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
// TODO
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
s_data[threadIdx.x] = in[i - 1];
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(int * in, int n, int* blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && i >= blockDim.x)
in[i] += blkSums[blockIdx.x - 1];
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Why "int * blockSizes"?
// Because we may want different block sizes for diffrent kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
dim3 gridSize2((nBins - 1)/ blkSize2.x + 1); // grid size for scan kernel
size_t smemSize = nBins * sizeof(int); // shared memory size for histogram kernel
int * d_hist, *d_histScan, * d_blkSums;
uint32_t *d_src;
int * blkSums;
blkSums = (int*)malloc(gridSize2.x * sizeof(int));
size_t sMemSize = blkSize2.x * sizeof(int); // shared memory size for scan kernel
CHECK(hipMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(hipMalloc(&d_hist, nBins * sizeof(int)));
CHECK(hipMalloc(&d_histScan, nBins * sizeof(int)));
CHECK(hipMalloc(&d_blkSums, gridSize2.x * sizeof(int)));
CHECK(hipMemcpy(d_src, src, n * sizeof(uint32_t), hipMemcpyHostToDevice));
//printf("nBins: %d\n", nBins);
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: compute hist by Device
CHECK(hipMemset(d_hist, 0, nBins * sizeof(int)));
hipLaunchKernelGGL(( computeHistKernel), dim3(gridSize1), dim3(blkSize1), smemSize, 0, d_src, n, d_hist, nBins, bit);
hipDeviceSynchronize();
CHECK(hipGetLastError());
CHECK(hipMemcpy(hist, d_hist, nBins * sizeof(int), hipMemcpyDeviceToHost));
// TODO: exclusice scan
hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize2), dim3(blkSize2), sMemSize, 0, d_hist, nBins, d_histScan, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
//CHECK(hipMemcpy(histScan, d_histScan, nBins * sizeof(int), hipMemcpyDeviceToHost));
//CHECK(hipMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), hipMemcpyDeviceToHost));
//for (int i = 1; i < gridSize2.x; i++)
// blkSums[i] += blkSums[i-1];
//for (int i = blkSize2.x; i < nBins; i++)
// histScan[i] += blkSums[(i - 1) / blkSize2.x];
hipLaunchKernelGGL(( addBlkSums), dim3(gridSize2), dim3(blkSize2), 0, 0, d_histScan, nBins, d_blkSums);
hipDeviceSynchronize();
CHECK(hipGetLastError());
CHECK(hipMemcpy(histScan, d_histScan, nBins * sizeof(int), hipMemcpyDeviceToHost));
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
CHECK(hipFree(d_src));
CHECK(hipFree(d_hist));
CHECK(hipFree(d_blkSums));
CHECK(hipFree(d_histScan));
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(blkSums);
free(hist);
free(histScan);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = 10; //(1 << 24) + 1; //24
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand() % 20 + 1;
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits);
//printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
//printArray(out, n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
0fd1e2954c7a546675a485cafa04938ee7e18029.cu
|
/*
09/12/2019
hmhuan-1612858
*/
#include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
// In each counting sort, we sort data in "src" and write result to "dst"
// Then, we swap these 2 pointers and go to the next counting sort
// At first, we assign "src = in" and "dest = out"
// However, the data pointed by "in" is read-only
// --> we create a copy of this data and assign "src" to the address of this copy
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit // histogram cua mang in xet tren digit hien tai
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0] = 0;
for (int i = 1; i < nBins; i++)
histScan[i] = histScan[i - 1] + hist[i - 1];
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++; // (neu cung bin thi ghi ben canh)
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// histogram kernel
__global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_bin[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int delta = (nBins - 1) / blockDim.x + 1;
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
s_bin[id] = 0;
}
__syncthreads();
if (i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_bin[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for (int i = 0; i < delta; i++)
{
int id = threadIdx.x + i * blockDim.x;
if (id < nBins)
atomicAdd(&hist[id], s_bin[id]);
}
}
// scan kernel
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
// TODO
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
s_data[threadIdx.x] = in[i - 1];
else
s_data[threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = s_data[threadIdx.x - stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[threadIdx.x];
if (blkSums != NULL)
blkSums[blockIdx.x] = s_data[blockDim.x - 1];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(int * in, int n, int* blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && i >= blockDim.x)
in[i] += blkSums[blockIdx.x - 1];
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Why "int * blockSizes"?
// Because we may want different block sizes for diffrent kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
dim3 gridSize2((nBins - 1)/ blkSize2.x + 1); // grid size for scan kernel
size_t smemSize = nBins * sizeof(int); // shared memory size for histogram kernel
int * d_hist, *d_histScan, * d_blkSums;
uint32_t *d_src;
int * blkSums;
blkSums = (int*)malloc(gridSize2.x * sizeof(int));
size_t sMemSize = blkSize2.x * sizeof(int); // shared memory size for scan kernel
CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_hist, nBins * sizeof(int)));
CHECK(cudaMalloc(&d_histScan, nBins * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int)));
CHECK(cudaMemcpy(d_src, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
//printf("nBins: %d\n", nBins);
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: compute hist by Device
CHECK(cudaMemset(d_hist, 0, nBins * sizeof(int)));
computeHistKernel<<<gridSize1, blkSize1, smemSize>>>(d_src, n, d_hist, nBins, bit);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(hist, d_hist, nBins * sizeof(int), cudaMemcpyDeviceToHost));
// TODO: exclusice scan
scanBlkKernel<<<gridSize2, blkSize2, sMemSize>>>(d_hist, nBins, d_histScan, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
//CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost));
//CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), cudaMemcpyDeviceToHost));
//for (int i = 1; i < gridSize2.x; i++)
// blkSums[i] += blkSums[i-1];
//for (int i = blkSize2.x; i < nBins; i++)
// histScan[i] += blkSums[(i - 1) / blkSize2.x];
addBlkSums<<<gridSize2, blkSize2>>>(d_histScan, nBins, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost));
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
CHECK(cudaFree(d_src));
CHECK(cudaFree(d_hist));
CHECK(cudaFree(d_blkSums));
CHECK(cudaFree(d_histScan));
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(blkSums);
free(hist);
free(histScan);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = 10; //(1 << 24) + 1; //24
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand() % 20 + 1;
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits);
//printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
//printArray(out, n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
475a610d385aa6690f31632368f02d00592eb1b3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* PCA Principal Component Analysis on raw data
* This implementation bases on matlab pca implementation
*/
#include "hip/hip_runtime.h"
#include "helper_cuda.h"
#include "device_launch_parameters.h"
#include "cusolverDn.h"
#include "pca.cuh"
#define imin(X, Y) ((X) < (Y) ? (X) : (Y))
__global__ void pca_gpu(float* tab, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n){
tab[i] = i*i;
}
}
void checkCuSolverErrors(cusolverStatus_t code){
if(code){
fprintf(stderr, "Cuda solver error code %d\n", static_cast<unsigned int>(code));
hipDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
void runPCA(nifti_data_type * data, int m, int n){
if (m < n){
fprintf(stderr, "rows parameter (m) is smaller than columns parameter (n)\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipSetDevice(0));
//initialize cusolverDn
hipsolverDnHandle_t handle = NULL;
hipsolverDnCreate(&handle); //sprawdzac checkCudaErrors
//allocate memory
nifti_data_type * dev_A;
checkCudaErrors(hipMalloc(&dev_A, m*n*sizeof(nifti_data_type)));
// copy data from cpu to gpu memory
checkCudaErrors(hipMemcpy(dev_A, data, m*n*sizeof(nifti_data_type), hipMemcpyHostToDevice));
// calculate the size needed for pre-allocated buffer
// xy - numer of rows, zv - number of columns
int Lwork;
checkCuSolverErrors(hipsolverDnSgesvd_bufferSize(handle, m, n, &Lwork));
//prepare arguments for cusolver svd
char jobu = 'A';
char jobvt = 'A';
int *devInfo; checkCudaErrors(hipMalloc(&devInfo, sizeof(int)));
int lda = m; // leading dimension is equal to m ?? (or n ??)
int ldu = m;
int ldvt = n;
// below there are some notes from the cuda toolkit cusolver documentation
// Note that the routine returns V H , not V.
// Remark 1: gesvd only supports m>=n. VEEEEEEEEERY IMPORTANT !!!!!!!!!!!!!!!!!!!!!
// Remark 2: gesvd only supports jobu='A' and jobvt='A' and returns matrix U and V H .
// rwork - needed for data types C,Z
printf("m = %d, n = %d, Lwork = %d\n", m, n, Lwork);
nifti_data_type * S, *U, *VT, *Work, *rwork;
checkCudaErrors(hipMalloc(&S, imin(m,n)*sizeof(nifti_data_type)));
checkCudaErrors(hipMalloc(&U, ldu*m*sizeof(nifti_data_type)));
checkCudaErrors(hipMalloc(&VT, ldvt*n*sizeof(nifti_data_type)));
checkCudaErrors(hipMalloc(&Work, Lwork*sizeof(nifti_data_type)));
//checkCudaErrors(hipMalloc(&rwork, 5*imin(m,n)*sizeof(nifti_data_type)));
// do we really need rwork??
// run cusolver svd
printf("before run cusolver svd\n");
checkCuSolverErrors(hipsolverDnSgesvd(handle, jobu, jobvt, m, n, dev_A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo));
int h_devInfo;
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(&h_devInfo, devInfo, sizeof(int), hipMemcpyDeviceToHost));
printf("devInfo %d\n", h_devInfo);
hipEvent_t start, stop;
float elapsedTime;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
// call kernel function here
//pca_gpu<<<64, 64>>>(dev_A, m*n);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
//copy results from gpu memory to cpu
//checkCudaErrors(hipMemcpy(c, dev_A, m*n*sizeof(float), hipMemcpyDeviceToHost));
//nifti_data_type * diagonalMatrix = (nifti_data_type *) malloc(imin(m,n)*sizeof(nifti_data_type));
//checkCudaErrors(hipMemcpy(diagonalMatrix, S, imin(m,n)*sizeof(nifti_data_type), hipMemcpyDeviceToHost));
//int k = imin(m,n);
//free(diagonalMatrix);
//free gpu memory
checkCudaErrors(hipFree(dev_A));
checkCudaErrors(hipFree(S));
checkCudaErrors(hipFree(U));
checkCudaErrors(hipFree(VT));
checkCudaErrors(hipFree(Work));
checkCudaErrors(hipFree(rwork));
checkCudaErrors(hipFree(devInfo));
hipsolverDnDestroy(handle); //sprawdzac checkCudaErrors
checkCudaErrors(hipDeviceReset()); // dla debuggera
//free(c);
printf("Kernel-only time: %f ms\n", elapsedTime);
return;
}
|
475a610d385aa6690f31632368f02d00592eb1b3.cu
|
/*
* PCA Principal Component Analysis on raw data
* This implementation bases on matlab pca implementation
*/
#include "cuda_runtime.h"
#include "helper_cuda.h"
#include "device_launch_parameters.h"
#include "cusolverDn.h"
#include "pca.cuh"
#define imin(X, Y) ((X) < (Y) ? (X) : (Y))
__global__ void pca_gpu(float* tab, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n){
tab[i] = i*i;
}
}
void checkCuSolverErrors(cusolverStatus_t code){
if(code){
fprintf(stderr, "Cuda solver error code %d\n", static_cast<unsigned int>(code));
cudaDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
void runPCA(nifti_data_type * data, int m, int n){
if (m < n){
fprintf(stderr, "rows parameter (m) is smaller than columns parameter (n)\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaSetDevice(0));
//initialize cusolverDn
cusolverDnHandle_t handle = NULL;
cusolverDnCreate(&handle); //sprawdzac checkCudaErrors
//allocate memory
nifti_data_type * dev_A;
checkCudaErrors(cudaMalloc(&dev_A, m*n*sizeof(nifti_data_type)));
// copy data from cpu to gpu memory
checkCudaErrors(cudaMemcpy(dev_A, data, m*n*sizeof(nifti_data_type), cudaMemcpyHostToDevice));
// calculate the size needed for pre-allocated buffer
// xy - numer of rows, zv - number of columns
int Lwork;
checkCuSolverErrors(cusolverDnSgesvd_bufferSize(handle, m, n, &Lwork));
//prepare arguments for cusolver svd
char jobu = 'A';
char jobvt = 'A';
int *devInfo; checkCudaErrors(cudaMalloc(&devInfo, sizeof(int)));
int lda = m; // leading dimension is equal to m ?? (or n ??)
int ldu = m;
int ldvt = n;
// below there are some notes from the cuda toolkit cusolver documentation
// Note that the routine returns V H , not V.
// Remark 1: gesvd only supports m>=n. VEEEEEEEEERY IMPORTANT !!!!!!!!!!!!!!!!!!!!!
// Remark 2: gesvd only supports jobu='A' and jobvt='A' and returns matrix U and V H .
// rwork - needed for data types C,Z
printf("m = %d, n = %d, Lwork = %d\n", m, n, Lwork);
nifti_data_type * S, *U, *VT, *Work, *rwork;
checkCudaErrors(cudaMalloc(&S, imin(m,n)*sizeof(nifti_data_type)));
checkCudaErrors(cudaMalloc(&U, ldu*m*sizeof(nifti_data_type)));
checkCudaErrors(cudaMalloc(&VT, ldvt*n*sizeof(nifti_data_type)));
checkCudaErrors(cudaMalloc(&Work, Lwork*sizeof(nifti_data_type)));
//checkCudaErrors(cudaMalloc(&rwork, 5*imin(m,n)*sizeof(nifti_data_type)));
// do we really need rwork??
// run cusolver svd
printf("before run cusolver svd\n");
checkCuSolverErrors(cusolverDnSgesvd(handle, jobu, jobvt, m, n, dev_A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo));
int h_devInfo;
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(&h_devInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
printf("devInfo %d\n", h_devInfo);
cudaEvent_t start, stop;
float elapsedTime;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
// call kernel function here
//pca_gpu<<<64, 64>>>(dev_A, m*n);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
//copy results from gpu memory to cpu
//checkCudaErrors(cudaMemcpy(c, dev_A, m*n*sizeof(float), cudaMemcpyDeviceToHost));
//nifti_data_type * diagonalMatrix = (nifti_data_type *) malloc(imin(m,n)*sizeof(nifti_data_type));
//checkCudaErrors(cudaMemcpy(diagonalMatrix, S, imin(m,n)*sizeof(nifti_data_type), cudaMemcpyDeviceToHost));
//int k = imin(m,n);
//free(diagonalMatrix);
//free gpu memory
checkCudaErrors(cudaFree(dev_A));
checkCudaErrors(cudaFree(S));
checkCudaErrors(cudaFree(U));
checkCudaErrors(cudaFree(VT));
checkCudaErrors(cudaFree(Work));
checkCudaErrors(cudaFree(rwork));
checkCudaErrors(cudaFree(devInfo));
cusolverDnDestroy(handle); //sprawdzac checkCudaErrors
checkCudaErrors(cudaDeviceReset()); // dla debuggera
//free(c);
printf("Kernel-only time: %f ms\n", elapsedTime);
return;
}
|
cb96848fd920fb3df762f2e009c80e70b12a5055.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mainwindow.h"
#include "qcustomplot.h"
#include "ui_mainwindow.h"
int MainWindow::GPU_TestMemcpy(int device) {
int size = gpu_info[device].SMCount * gpu_info[device].MaxThreadsBlock;
hipSetDevice(device);
hipEvent_t event_start;
hipEvent_t event_end;
host_arr_double = new double[size];
host_arr_float = new float[size];
host_arr_int = new int[size];
host_arr_long = new long long[size];
//hipEvent_t event_start;
//hipEvent_t event_end;
for (int i = 0; i < size; i++) {
srand(time(NULL));
host_arr_double[i] = (double)(rand() % 100) / (double)1000;
srand(time(NULL));
host_arr_float[i] = (float)(rand() % 100) / (float)1000;
srand(time(NULL));
host_arr_int[i] = rand() % 100;
srand(time(NULL));
host_arr_long[i] = rand() % 100;
}
double *device_arr_double;
float *device_arr_float;
int *device_arr_int;
long long *device_arr_long;
// Memcpy HostToDevice
hipMalloc((void **)&device_arr_double, sizeof(double) * size);
hipMalloc((void **)&device_arr_float, sizeof(float) * size);
hipMalloc((void **)&device_arr_int, sizeof(int) * size);
hipMalloc((void **)&device_arr_long, sizeof(long long) * size);
hipEventCreate(&event_start);
hipEventCreate(&event_end);
hipEventRecord(event_start, 0);
for (int i = 0; i < 16; i++) {
hipMemcpy(device_arr_double, host_arr_double, sizeof(double) * size, hipMemcpyHostToDevice);
hipMemcpy(device_arr_float, host_arr_float, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_arr_int, host_arr_int, sizeof(int) * size, hipMemcpyHostToDevice);
hipMemcpy(device_arr_long, host_arr_long, sizeof(long long) * size, hipMemcpyHostToDevice);
}
hipEventRecord(event_end, 0);
hipEventSynchronize(event_end);
hipEventElapsedTime(&gpu_info[device].h2d_copy_time, event_start, event_end);
hipEventDestroy(event_start);
hipEventDestroy(event_end);
// Memcpy DeviceToHost
hipEventCreate(&event_start);
hipEventCreate(&event_end);
hipEventRecord(event_start, 0);
for (int i = 0; i < 16; i++) {
hipMemcpy(host_arr_double, device_arr_double, sizeof(double) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_arr_float, device_arr_float, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_arr_int, device_arr_int, sizeof(int) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_arr_long, device_arr_long, sizeof(long long) * size, hipMemcpyDeviceToHost);
}
hipEventRecord(event_end, 0);
hipEventSynchronize(event_end);
hipEventElapsedTime(&gpu_info[device].d2h_copy_time, event_start, event_end);
hipEventDestroy(event_start);
hipEventDestroy(event_end);
//hipEventCreate(&event_start);
//hipEventCreate(&event_end);
//hipEventRecord(event_start, 0);
float *host_g2s = new float[size];
float *host_s2g = new float[size];
float *host_s2r = new float[size];
float *host_r2s = new float[size];
float *host_g2r = new float[size];
float *host_r2g = new float[size];
for (int i = 0; i < size; i++) {
host_g2s[i] = 0.0;
host_s2g[i] = 0.0;
host_s2r[i] = 0.0;
host_r2s[i] = 0.0;
host_g2r[i] = 0.0;
host_r2g[i] = 0.0;
}
float *device_g2s;
float *device_s2g;
float *device_s2r;
float *device_r2s;
float *device_g2r;
float *device_r2g;
hipMalloc((void **)&device_g2s, sizeof(float) * size);
hipMalloc((void **)&device_s2g, sizeof(float) * size);
hipMalloc((void **)&device_s2r, sizeof(float) * size);
hipMalloc((void **)&device_r2s, sizeof(float) * size);
hipMalloc((void **)&device_g2r, sizeof(float) * size);
hipMalloc((void **)&device_r2g, sizeof(float) * size);
hipMemcpy(device_g2s, host_g2s, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_s2g, host_s2g, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_s2r, host_s2r, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_r2s, host_r2s, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_g2r, host_g2r, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(device_r2g, host_r2g, sizeof(float) * size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( TestDeviceMemcpy) , dim3(gpu_info[device].SMCount), dim3(gpu_info[device].MaxThreadsBlock) , 0, 0, device_arr_double, device_arr_float, device_arr_int, device_arr_long,
device_g2s, device_s2g, device_s2r, device_r2s, device_g2r, device_r2g, size);
hipMemcpy(host_g2s, device_g2s, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_s2g, device_s2g, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_s2r, device_s2r, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_r2s, device_r2s, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_g2r, device_g2r, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(host_r2g, device_r2g, sizeof(float) * size, hipMemcpyDeviceToHost);
gpu_info[device].g2s_copy_time = 0.0;
gpu_info[device].s2g_copy_time = 0.0;
gpu_info[device].s2r_copy_time = 0.0;
gpu_info[device].r2s_copy_time = 0.0;
gpu_info[device].g2r_copy_time = 0.0;
gpu_info[device].r2g_copy_time = 0.0;
for (int i = 0; i < size; i++) {
gpu_info[device].g2s_copy_time += host_g2s[i] / gpu_info[device].ClockRate;
gpu_info[device].s2g_copy_time += host_s2g[i] / gpu_info[device].ClockRate;
gpu_info[device].s2r_copy_time += host_s2r[i] / gpu_info[device].ClockRate;
gpu_info[device].r2s_copy_time += host_r2s[i] / gpu_info[device].ClockRate;
gpu_info[device].g2r_copy_time += host_g2r[i] / gpu_info[device].ClockRate;
gpu_info[device].r2g_copy_time += host_r2g[i] / gpu_info[device].ClockRate;
}
//hipEventRecord(event_end, 0);
//hipEventSynchronize(event_end);
//hipEventElapsedTime(&gpu_info[device].d2h_copy_time, event_start, event_end);
//hipEventDestroy(event_start);
//hipEventDestroy(event_end);
delete[] host_arr_double;
delete[] host_arr_float;
delete[] host_arr_int;
delete[] host_arr_long;
delete[] host_g2s;
delete[] host_s2g;
delete[] host_s2r;
delete[] host_r2s;
delete[] host_g2r;
delete[] host_r2g;
hipFree(device_arr_double);
hipFree(device_arr_float);
hipFree(device_arr_int);
hipFree(device_arr_long);
hipFree(device_g2s);
hipFree(device_s2g);
hipFree(device_s2r);
hipFree(device_r2s);
hipFree(device_g2r);
hipFree(device_r2g);
return 0;
}
|
cb96848fd920fb3df762f2e009c80e70b12a5055.cu
|
#include "mainwindow.h"
#include "qcustomplot.h"
#include "ui_mainwindow.h"
int MainWindow::GPU_TestMemcpy(int device) {
int size = gpu_info[device].SMCount * gpu_info[device].MaxThreadsBlock;
cudaSetDevice(device);
cudaEvent_t event_start;
cudaEvent_t event_end;
host_arr_double = new double[size];
host_arr_float = new float[size];
host_arr_int = new int[size];
host_arr_long = new long long[size];
//cudaEvent_t event_start;
//cudaEvent_t event_end;
for (int i = 0; i < size; i++) {
srand(time(NULL));
host_arr_double[i] = (double)(rand() % 100) / (double)1000;
srand(time(NULL));
host_arr_float[i] = (float)(rand() % 100) / (float)1000;
srand(time(NULL));
host_arr_int[i] = rand() % 100;
srand(time(NULL));
host_arr_long[i] = rand() % 100;
}
double *device_arr_double;
float *device_arr_float;
int *device_arr_int;
long long *device_arr_long;
// Memcpy HostToDevice
cudaMalloc((void **)&device_arr_double, sizeof(double) * size);
cudaMalloc((void **)&device_arr_float, sizeof(float) * size);
cudaMalloc((void **)&device_arr_int, sizeof(int) * size);
cudaMalloc((void **)&device_arr_long, sizeof(long long) * size);
cudaEventCreate(&event_start);
cudaEventCreate(&event_end);
cudaEventRecord(event_start, 0);
for (int i = 0; i < 16; i++) {
cudaMemcpy(device_arr_double, host_arr_double, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_arr_float, host_arr_float, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_arr_int, host_arr_int, sizeof(int) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_arr_long, host_arr_long, sizeof(long long) * size, cudaMemcpyHostToDevice);
}
cudaEventRecord(event_end, 0);
cudaEventSynchronize(event_end);
cudaEventElapsedTime(&gpu_info[device].h2d_copy_time, event_start, event_end);
cudaEventDestroy(event_start);
cudaEventDestroy(event_end);
// Memcpy DeviceToHost
cudaEventCreate(&event_start);
cudaEventCreate(&event_end);
cudaEventRecord(event_start, 0);
for (int i = 0; i < 16; i++) {
cudaMemcpy(host_arr_double, device_arr_double, sizeof(double) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_arr_float, device_arr_float, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_arr_int, device_arr_int, sizeof(int) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_arr_long, device_arr_long, sizeof(long long) * size, cudaMemcpyDeviceToHost);
}
cudaEventRecord(event_end, 0);
cudaEventSynchronize(event_end);
cudaEventElapsedTime(&gpu_info[device].d2h_copy_time, event_start, event_end);
cudaEventDestroy(event_start);
cudaEventDestroy(event_end);
//cudaEventCreate(&event_start);
//cudaEventCreate(&event_end);
//cudaEventRecord(event_start, 0);
float *host_g2s = new float[size];
float *host_s2g = new float[size];
float *host_s2r = new float[size];
float *host_r2s = new float[size];
float *host_g2r = new float[size];
float *host_r2g = new float[size];
for (int i = 0; i < size; i++) {
host_g2s[i] = 0.0;
host_s2g[i] = 0.0;
host_s2r[i] = 0.0;
host_r2s[i] = 0.0;
host_g2r[i] = 0.0;
host_r2g[i] = 0.0;
}
float *device_g2s;
float *device_s2g;
float *device_s2r;
float *device_r2s;
float *device_g2r;
float *device_r2g;
cudaMalloc((void **)&device_g2s, sizeof(float) * size);
cudaMalloc((void **)&device_s2g, sizeof(float) * size);
cudaMalloc((void **)&device_s2r, sizeof(float) * size);
cudaMalloc((void **)&device_r2s, sizeof(float) * size);
cudaMalloc((void **)&device_g2r, sizeof(float) * size);
cudaMalloc((void **)&device_r2g, sizeof(float) * size);
cudaMemcpy(device_g2s, host_g2s, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_s2g, host_s2g, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_s2r, host_s2r, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_r2s, host_r2s, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_g2r, host_g2r, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(device_r2g, host_r2g, sizeof(float) * size, cudaMemcpyHostToDevice);
TestDeviceMemcpy <<< gpu_info[device].SMCount, gpu_info[device].MaxThreadsBlock >>> (device_arr_double, device_arr_float, device_arr_int, device_arr_long,
device_g2s, device_s2g, device_s2r, device_r2s, device_g2r, device_r2g, size);
cudaMemcpy(host_g2s, device_g2s, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_s2g, device_s2g, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_s2r, device_s2r, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_r2s, device_r2s, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_g2r, device_g2r, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(host_r2g, device_r2g, sizeof(float) * size, cudaMemcpyDeviceToHost);
gpu_info[device].g2s_copy_time = 0.0;
gpu_info[device].s2g_copy_time = 0.0;
gpu_info[device].s2r_copy_time = 0.0;
gpu_info[device].r2s_copy_time = 0.0;
gpu_info[device].g2r_copy_time = 0.0;
gpu_info[device].r2g_copy_time = 0.0;
for (int i = 0; i < size; i++) {
gpu_info[device].g2s_copy_time += host_g2s[i] / gpu_info[device].ClockRate;
gpu_info[device].s2g_copy_time += host_s2g[i] / gpu_info[device].ClockRate;
gpu_info[device].s2r_copy_time += host_s2r[i] / gpu_info[device].ClockRate;
gpu_info[device].r2s_copy_time += host_r2s[i] / gpu_info[device].ClockRate;
gpu_info[device].g2r_copy_time += host_g2r[i] / gpu_info[device].ClockRate;
gpu_info[device].r2g_copy_time += host_r2g[i] / gpu_info[device].ClockRate;
}
//cudaEventRecord(event_end, 0);
//cudaEventSynchronize(event_end);
//cudaEventElapsedTime(&gpu_info[device].d2h_copy_time, event_start, event_end);
//cudaEventDestroy(event_start);
//cudaEventDestroy(event_end);
delete[] host_arr_double;
delete[] host_arr_float;
delete[] host_arr_int;
delete[] host_arr_long;
delete[] host_g2s;
delete[] host_s2g;
delete[] host_s2r;
delete[] host_r2s;
delete[] host_g2r;
delete[] host_r2g;
cudaFree(device_arr_double);
cudaFree(device_arr_float);
cudaFree(device_arr_int);
cudaFree(device_arr_long);
cudaFree(device_g2s);
cudaFree(device_s2g);
cudaFree(device_s2r);
cudaFree(device_r2s);
cudaFree(device_g2r);
cudaFree(device_r2g);
return 0;
}
|
bad30e9f5d7d807dbd1fd9da8718778827a7d8e6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
namespace cudf {
namespace experimental {
namespace detail {
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>
bools_to_mask(column_view const& input,
rmm::mr::device_memory_resource * mr,
hipStream_t stream) {
CUDF_EXPECTS(input.type().id() == BOOL8, "Input is not of type bool");
if(input.size() == 0){
return std::make_pair(std::make_unique<rmm::device_buffer>(), 0);
}
auto input_device_view_ptr = column_device_view::create(input, stream);
auto input_device_view = *input_device_view_ptr;
auto pred = [] __device__ (bool element) {
return element;
};
if(input.nullable()) {
// Nulls are considered false
auto input_begin = make_null_replacement_iterator<bool>(input_device_view, false);
auto mask = detail::valid_if(input_begin,
input_begin + input.size(),
pred, stream, mr);
return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
} else {
auto mask = detail::valid_if(input_device_view.begin<bool>(),
input_device_view.end<bool>(),
pred, stream, mr);
return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
}
}
}// namespace detail
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>
bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr) {
CUDF_FUNC_RANGE();
return detail::bools_to_mask(input, mr);
}
}// namespace experimental
}// namespace cudf
|
bad30e9f5d7d807dbd1fd9da8718778827a7d8e6.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
namespace cudf {
namespace experimental {
namespace detail {
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>
bools_to_mask(column_view const& input,
rmm::mr::device_memory_resource * mr,
cudaStream_t stream) {
CUDF_EXPECTS(input.type().id() == BOOL8, "Input is not of type bool");
if(input.size() == 0){
return std::make_pair(std::make_unique<rmm::device_buffer>(), 0);
}
auto input_device_view_ptr = column_device_view::create(input, stream);
auto input_device_view = *input_device_view_ptr;
auto pred = [] __device__ (bool element) {
return element;
};
if(input.nullable()) {
// Nulls are considered false
auto input_begin = make_null_replacement_iterator<bool>(input_device_view, false);
auto mask = detail::valid_if(input_begin,
input_begin + input.size(),
pred, stream, mr);
return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
} else {
auto mask = detail::valid_if(input_device_view.begin<bool>(),
input_device_view.end<bool>(),
pred, stream, mr);
return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
}
}
}// namespace detail
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>
bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr) {
CUDF_FUNC_RANGE();
return detail::bools_to_mask(input, mr);
}
}// namespace experimental
}// namespace cudf
|
866b83bb1a4697e3a9591c42dac3f8b376f44e3f.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ELUFunctor<T>>(stream, output, input);
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
866b83bb1a4697e3a9591c42dac3f8b376f44e3f.cu
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ELUFunctor<T>>(stream, output, input);
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
7e46a173d6fbb74aa05cba5baa57cecc5b54701f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht and xt < wt and mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb and yb < hb and 0 <= xb and xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__device__ float TARGETCOLOR(
const float *target,
const int xt, const int yt, const int color, const int oldx, const int oldy,
const int wt, const int ht
) {
if(0 <= xt && xt < wt && 0 <= yt && yt < ht)
return target[(wt*oldy+oldx)*3 + color] - target[(wt*yt+xt)*3 + color];
else
return 0;
}
__device__ float FIX(
const float *target,
const int xt, const int yt, const int color,
const int wt, const int ht
) {
return (TARGETCOLOR(target,xt-1, yt, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt, yt-1, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt+1, yt, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt, yt+1, color, xt, yt, wt, ht));
}
__device__ float findBackground(const float *background,const int color, const int xt, const int yt, const int wb, const int hb, const int ox, const int oy) {
int safex = xt + ox, safey = yt + oy;
safex = safex < 0 ? 0 : (safex >= wb ? wb-1 : safex);
safey = safey < 0 ? 0 : safey;
safey = safey >= hb ? hb-1 : safey;
return background[(safey * wb + safex)*3 + color];
}
__device__ float BUFFERCOLOR(
const float *source,
const float *background,
const float *mask,
const int xt, const int yt, const int color,
const int wt, const int ht, const int wb, const int hb, const int ox, const int oy
) {
if(0<=yt && yt < ht && 0 <= xt && xt < wt) {
//INMASK
if( mask[wt*yt+xt] > 127.0f ) {
return source[(wt*yt+xt)*3 + color];
//OUTMASK
} else {
return findBackground(background , color, xt, yt, wb, hb, ox, oy);
}
//OUT TARGET
} else {
return findBackground(background, color, xt, yt ,wb, hb, ox, oy);
}
}
__device__ float BUFFER(
const float *source,
const float *background,
const float *mask,
const int xt, const int yt, const int color,
const int wt, const int ht, const int wb, const int hb, const int ox, const int oy
) {
return BUFFERCOLOR(source, background , mask, xt-1, yt, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt, yt-1, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt+1, yt, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt, yt+1, color, wt, ht, wb, hb, ox, oy);
}
__global__ void PoissonImageCloningIteration(
const float *background,
const float *target,
const float *mask,
float *source,
float *dest,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (0 <= yt && yt < ht && 0 <= xt && xt < wt) {
dest[curt*3+0] = (FIX(target, xt, yt, 0, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 0, wt, ht, wb, hb, ox, oy))/4.0f;
dest[curt*3+1] = (FIX(target, xt, yt, 1, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 1, wt, ht, wb, hb, ox, oy))/4.0f;
dest[curt*3+2] = (FIX(target, xt, yt, 2, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 2, wt, ht, wb, hb, ox, oy))/4.0f;
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
// set up
float *fixed, *buf1, *buf2;
hipMalloc(&fixed, 3*wt*ht*sizeof(float));
hipMalloc(&buf1, 3*wt*ht*sizeof(float));
hipMalloc(&buf2, 3*wt*ht*sizeof(float));
// initialize the iteration
dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16);
hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice);
// iterate
for (int i = 0; i < 10000; ++i) {
hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0,
background, target, mask, buf1, buf2, wb, hb, wt, ht, oy, ox
);
hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0,
background, target, mask, buf2, buf1, wb, hb, wt, ht, oy, ox
);
}
hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( SimpleClone), dim3(gdim), dim3(bdim), 0, 0,
background, buf1, mask, output,
wb, hb, wt, ht, oy, ox
);
// clean up
hipFree(fixed);
hipFree(buf1);
hipFree(buf2);
}
|
7e46a173d6fbb74aa05cba5baa57cecc5b54701f.cu
|
#include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht and xt < wt and mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb and yb < hb and 0 <= xb and xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__device__ float TARGETCOLOR(
const float *target,
const int xt, const int yt, const int color, const int oldx, const int oldy,
const int wt, const int ht
) {
if(0 <= xt && xt < wt && 0 <= yt && yt < ht)
return target[(wt*oldy+oldx)*3 + color] - target[(wt*yt+xt)*3 + color];
else
return 0;
}
__device__ float FIX(
const float *target,
const int xt, const int yt, const int color,
const int wt, const int ht
) {
return (TARGETCOLOR(target,xt-1, yt, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt, yt-1, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt+1, yt, color, xt, yt, wt, ht) +
TARGETCOLOR(target,xt, yt+1, color, xt, yt, wt, ht));
}
__device__ float findBackground(const float *background,const int color, const int xt, const int yt, const int wb, const int hb, const int ox, const int oy) {
int safex = xt + ox, safey = yt + oy;
safex = safex < 0 ? 0 : (safex >= wb ? wb-1 : safex);
safey = safey < 0 ? 0 : safey;
safey = safey >= hb ? hb-1 : safey;
return background[(safey * wb + safex)*3 + color];
}
__device__ float BUFFERCOLOR(
const float *source,
const float *background,
const float *mask,
const int xt, const int yt, const int color,
const int wt, const int ht, const int wb, const int hb, const int ox, const int oy
) {
if(0<=yt && yt < ht && 0 <= xt && xt < wt) {
//INMASK
if( mask[wt*yt+xt] > 127.0f ) {
return source[(wt*yt+xt)*3 + color];
//OUTMASK
} else {
return findBackground(background , color, xt, yt, wb, hb, ox, oy);
}
//OUT TARGET
} else {
return findBackground(background, color, xt, yt ,wb, hb, ox, oy);
}
}
__device__ float BUFFER(
const float *source,
const float *background,
const float *mask,
const int xt, const int yt, const int color,
const int wt, const int ht, const int wb, const int hb, const int ox, const int oy
) {
return BUFFERCOLOR(source, background , mask, xt-1, yt, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt, yt-1, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt+1, yt, color, wt, ht, wb, hb, ox, oy) +
BUFFERCOLOR(source, background , mask, xt, yt+1, color, wt, ht, wb, hb, ox, oy);
}
__global__ void PoissonImageCloningIteration(
const float *background,
const float *target,
const float *mask,
float *source,
float *dest,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (0 <= yt && yt < ht && 0 <= xt && xt < wt) {
dest[curt*3+0] = (FIX(target, xt, yt, 0, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 0, wt, ht, wb, hb, ox, oy))/4.0f;
dest[curt*3+1] = (FIX(target, xt, yt, 1, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 1, wt, ht, wb, hb, ox, oy))/4.0f;
dest[curt*3+2] = (FIX(target, xt, yt, 2, wt, ht)
+ BUFFER(source, background, mask, xt, yt , 2, wt, ht, wb, hb, ox, oy))/4.0f;
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
// set up
float *fixed, *buf1, *buf2;
cudaMalloc(&fixed, 3*wt*ht*sizeof(float));
cudaMalloc(&buf1, 3*wt*ht*sizeof(float));
cudaMalloc(&buf2, 3*wt*ht*sizeof(float));
// initialize the iteration
dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16);
cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice);
// iterate
for (int i = 0; i < 10000; ++i) {
PoissonImageCloningIteration<<<gdim, bdim>>>(
background, target, mask, buf1, buf2, wb, hb, wt, ht, oy, ox
);
PoissonImageCloningIteration<<<gdim, bdim>>>(
background, target, mask, buf2, buf1, wb, hb, wt, ht, oy, ox
);
}
cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice);
SimpleClone<<<gdim, bdim>>>(
background, buf1, mask, output,
wb, hb, wt, ht, oy, ox
);
// clean up
cudaFree(fixed);
cudaFree(buf1);
cudaFree(buf2);
}
|
f14b43044565de50d7618cbd6cc9e14d3abeb1a6.hip
|
// !!! This is a file automatically generated by hipify!!!
// System includes
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
#include "helper_functions.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS (1024 * 1024)
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 32 // number of threads per block
#endif
#define NUMBLOCKS (NUMTRIALS / BLOCKSIZE)
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf(float, float);
int Ranf(int, int);
void TimeOfDaySeed();
__global__ void MonteCarlo(float *Xcs, float *Ycs, float *Rs, int *Hits) {
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber * wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf((float)((M_PI / 180.) * 30.));
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn * tn;
float b = -2. * (xc + yc * tn);
float c = xc * xc + yc * yc - r * r;
float d = b * b - 4. * a * c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if (d >= 0) {
d = sqrt(d);
float t1 = (-b + d) / (2. * a); // time to intersect the circle
float t2 = (-b - d) / (2. * a); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
if (tmin >= 0) {
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin * tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float nxy = sqrt(nx * nx + ny * ny);
nx /= nxy; // unit vector
ny /= nxy; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt(inx * inx + iny * iny);
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx * nx + iny * ny;
float outy =
iny - 2. * ny * dot; // angle of reflection = angle of incidence`
// find out if it hits the infinite plate:
float t = (0. - ycir) / outy;
if (t >= 0.) {
Hits[gid] = 1;
}
}
}
}
// main program:
int main(int argc, char *argv[]) {
TimeOfDaySeed();
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float *hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for (int n = 0; n < NUMTRIALS; n++) {
hXcs[n] = Ranf(XCMIN, XCMAX);
hYcs[n] = Ranf(YCMIN, YCMAX);
hRs[n] = Ranf(RMIN, RMAX);
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs(NUMTRIALS, 1, 1);
dim3 dimsYcs(NUMTRIALS, 1, 1);
dim3 dimsRs(NUMTRIALS, 1, 1);
dim3 dimsHits(NUMTRIALS, 1, 1);
hipError_t status;
status = hipMalloc((void **)(&dXcs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = hipMalloc((void **)(&dYcs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = hipMalloc((void **)(&dRs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = hipMalloc((void **)(&dHits), NUMTRIALS * sizeof(int));
checkCudaErrors(status);
// copy host memory to the device:
status =
hipMemcpy(dXcs, hXcs, NUMTRIALS * sizeof(float), hipMemcpyHostToDevice);
checkCudaErrors(status);
status =
hipMemcpy(dYcs, hYcs, NUMTRIALS * sizeof(float), hipMemcpyHostToDevice);
checkCudaErrors(status);
status =
hipMemcpy(dRs, hRs, NUMTRIALS * sizeof(float), hipMemcpyHostToDevice);
checkCudaErrors(status);
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1);
dim3 grid(NUMBLOCKS, 1, 1);
// create and start timer
hipDeviceSynchronize();
// allocate CUDA events that we'll use for timing:
hipEvent_t start, stop;
status = hipEventCreate(&start);
checkCudaErrors(status);
status = hipEventCreate(&stop);
checkCudaErrors(status);
// record the start event:
status = hipEventRecord(start, NULL);
checkCudaErrors(status);
// execute the kernel:
hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads), 0, 0, dXcs, dYcs, dRs, dHits);
// record the stop event:
status = hipEventRecord(stop, NULL);
checkCudaErrors(status);
// wait for the stop event to complete:
status = hipEventSynchronize(stop);
checkCudaErrors(status);
float msecTotal = 0.0f;
status = hipEventElapsedTime(&msecTotal, start, stop);
checkCudaErrors(status);
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
fprintf(stderr, "%d,%lf",
NUMTRIALS, megaTrialsPerSecond);
// copy result from the device to the host:
status =
hipMemcpy(hHits, dHits, NUMTRIALS * sizeof(int), hipMemcpyDeviceToHost);
checkCudaErrors(status);
hipDeviceSynchronize();
// compute the probability:
int numHits = 0;
for (int i = 0; i < NUMTRIALS; i++) {
numHits += hHits[i];
}
float probability = 100.f * (float)numHits / (float)NUMTRIALS;
fprintf(stderr, ",%lf\n", probability);
// clean up memory:
delete[] hXcs;
delete[] hYcs;
delete[] hRs;
delete[] hHits;
status = hipFree(dXcs);
status = hipFree(dYcs);
status = hipFree(dRs);
status = hipFree(dHits);
checkCudaErrors(status);
return 0;
}
float Ranf(float low, float high) {
float r = (float)rand(); // 0 - RAND_MAX
float t = r / (float)RAND_MAX; // 0. - 1.
return low + t * (high - low);
}
int Ranf(int ilow, int ihigh) {
float low = (float)ilow;
float high = ceil((float)ihigh);
return (int)Ranf(low, high);
}
void TimeOfDaySeed() {
struct tm y2k = {0};
y2k.tm_hour = 0;
y2k.tm_min = 0;
y2k.tm_sec = 0;
y2k.tm_year = 100;
y2k.tm_mon = 0;
y2k.tm_mday = 1;
time_t timer;
time(&timer);
double seconds = difftime(timer, mktime(&y2k));
unsigned int seed = (unsigned int)(1000. * seconds); // milliseconds
srand(seed);
}
|
f14b43044565de50d7618cbd6cc9e14d3abeb1a6.cu
|
// System includes
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
#include "helper_functions.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS (1024 * 1024)
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 32 // number of threads per block
#endif
#define NUMBLOCKS (NUMTRIALS / BLOCKSIZE)
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf(float, float);
int Ranf(int, int);
void TimeOfDaySeed();
__global__ void MonteCarlo(float *Xcs, float *Ycs, float *Rs, int *Hits) {
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber * wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf((float)((M_PI / 180.) * 30.));
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn * tn;
float b = -2. * (xc + yc * tn);
float c = xc * xc + yc * yc - r * r;
float d = b * b - 4. * a * c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if (d >= 0) {
d = sqrt(d);
float t1 = (-b + d) / (2. * a); // time to intersect the circle
float t2 = (-b - d) / (2. * a); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
if (tmin >= 0) {
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin * tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float nxy = sqrt(nx * nx + ny * ny);
nx /= nxy; // unit vector
ny /= nxy; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt(inx * inx + iny * iny);
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx * nx + iny * ny;
float outy =
iny - 2. * ny * dot; // angle of reflection = angle of incidence`
// find out if it hits the infinite plate:
float t = (0. - ycir) / outy;
if (t >= 0.) {
Hits[gid] = 1;
}
}
}
}
// main program:
int main(int argc, char *argv[]) {
TimeOfDaySeed();
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float *hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for (int n = 0; n < NUMTRIALS; n++) {
hXcs[n] = Ranf(XCMIN, XCMAX);
hYcs[n] = Ranf(YCMIN, YCMAX);
hRs[n] = Ranf(RMIN, RMAX);
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs(NUMTRIALS, 1, 1);
dim3 dimsYcs(NUMTRIALS, 1, 1);
dim3 dimsRs(NUMTRIALS, 1, 1);
dim3 dimsHits(NUMTRIALS, 1, 1);
cudaError_t status;
status = cudaMalloc((void **)(&dXcs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = cudaMalloc((void **)(&dYcs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = cudaMalloc((void **)(&dRs), NUMTRIALS * sizeof(float));
checkCudaErrors(status);
status = cudaMalloc((void **)(&dHits), NUMTRIALS * sizeof(int));
checkCudaErrors(status);
// copy host memory to the device:
status =
cudaMemcpy(dXcs, hXcs, NUMTRIALS * sizeof(float), cudaMemcpyHostToDevice);
checkCudaErrors(status);
status =
cudaMemcpy(dYcs, hYcs, NUMTRIALS * sizeof(float), cudaMemcpyHostToDevice);
checkCudaErrors(status);
status =
cudaMemcpy(dRs, hRs, NUMTRIALS * sizeof(float), cudaMemcpyHostToDevice);
checkCudaErrors(status);
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1);
dim3 grid(NUMBLOCKS, 1, 1);
// create and start timer
cudaDeviceSynchronize();
// allocate CUDA events that we'll use for timing:
cudaEvent_t start, stop;
status = cudaEventCreate(&start);
checkCudaErrors(status);
status = cudaEventCreate(&stop);
checkCudaErrors(status);
// record the start event:
status = cudaEventRecord(start, NULL);
checkCudaErrors(status);
// execute the kernel:
MonteCarlo<<<grid, threads>>>(dXcs, dYcs, dRs, dHits);
// record the stop event:
status = cudaEventRecord(stop, NULL);
checkCudaErrors(status);
// wait for the stop event to complete:
status = cudaEventSynchronize(stop);
checkCudaErrors(status);
float msecTotal = 0.0f;
status = cudaEventElapsedTime(&msecTotal, start, stop);
checkCudaErrors(status);
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
fprintf(stderr, "%d,%lf",
NUMTRIALS, megaTrialsPerSecond);
// copy result from the device to the host:
status =
cudaMemcpy(hHits, dHits, NUMTRIALS * sizeof(int), cudaMemcpyDeviceToHost);
checkCudaErrors(status);
cudaDeviceSynchronize();
// compute the probability:
int numHits = 0;
for (int i = 0; i < NUMTRIALS; i++) {
numHits += hHits[i];
}
float probability = 100.f * (float)numHits / (float)NUMTRIALS;
fprintf(stderr, ",%lf\n", probability);
// clean up memory:
delete[] hXcs;
delete[] hYcs;
delete[] hRs;
delete[] hHits;
status = cudaFree(dXcs);
status = cudaFree(dYcs);
status = cudaFree(dRs);
status = cudaFree(dHits);
checkCudaErrors(status);
return 0;
}
float Ranf(float low, float high) {
float r = (float)rand(); // 0 - RAND_MAX
float t = r / (float)RAND_MAX; // 0. - 1.
return low + t * (high - low);
}
int Ranf(int ilow, int ihigh) {
float low = (float)ilow;
float high = ceil((float)ihigh);
return (int)Ranf(low, high);
}
void TimeOfDaySeed() {
struct tm y2k = {0};
y2k.tm_hour = 0;
y2k.tm_min = 0;
y2k.tm_sec = 0;
y2k.tm_year = 100;
y2k.tm_mon = 0;
y2k.tm_mday = 1;
time_t timer;
time(&timer);
double seconds = difftime(timer, mktime(&y2k));
unsigned int seed = (unsigned int)(1000. * seconds); // milliseconds
srand(seed);
}
|
0fd06ba9ff9dc4e1cc4ff150bf12d5ce0213bb6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/Resize.h>
#include <ATen/native/TypeProperties.h>
#include <ATen/native/TensorShape.h>
#include <ATen/Dispatch.h>
#include <c10/core/MemoryFormat.h>
#include <c10/util/Optional.h>
namespace at {
namespace native {
constexpr int CAT_ARRAY_BATCH_SIZE = 128;
constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4;
namespace {
inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) {
const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
//X dim of grid for cat array cooperates on a single tensor in the cat.
//Given half of the GPU, full utilization will always occur.
grid = dim3( 2LL * numSM, (long long) nTensors );
return true;
}
// Similar to any other IndexToOffset calculation for copying along a given
// dimension.
template <typename IndexType, int Dims>
struct CatArrIndexToOffset {
static inline __device__ IndexType compute(
const IndexType tensorSize[Dims],
const IndexType tensorStride[Dims],
const IndexType dimSize,
const unsigned int concatDim,
IndexType linearIndex) {
// linearIndex is not really linear index, but instead the offset in
// input tensor. If the input tensor is contiguous, then this offset
// is the linear index, but if the input tensor is channels last, then
// it is the linear index of the permuted contiguous tensor
IndexType offset = 0;
#pragma unroll
for (int i = Dims - 1; i >= 1; --i) {
IndexType curDimSize = i == concatDim ? dimSize : tensorSize[i];
IndexType nextDimIndex = linearIndex / curDimSize;
IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex;
IndexType curDimOffset = curDimIndex * tensorStride[i];
offset += curDimOffset;
linearIndex = nextDimIndex;
}
return offset + linearIndex * tensorStride[0];
}
};
template<typename IndexType, unsigned int MaxDims>
struct TensorSizeStride {
IndexType tensorSize[MaxDims];
IndexType tensorStride[MaxDims];
};
/**
* Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a
* grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to
* copy each element from each input tensor into the output.
*
* output: base pointer to the storage associated with the output tensor
* inputs: GPU-allocated array of input metadata for each input to concatenate
* in the kernel
* os: the size/stride vectors for the output tensor
* concatDim: dimension along which we are concatenating
* dimStride: the stride of the output tensor at the concatDim
*
* The most important assumption made is that the input tensors are contiguous.
*/
// pass meta data directly through kernel argument instead of pin memory
// In contiguous case, we will not need stride_size, setting it as 1 as placeholder
// to pass compile.
template <typename T, typename IndexType, int n, int stride_size>
struct CatArrInputTensorMetadata {
T* input[n];
IndexType offset[n];
IndexType dimSize[n];
IndexType nElements[n];
bool isContiguous[n];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> tensorStride[stride_size];
};
template <typename T, typename IndexType, int Dims, int batch_size, int stride_size>
__global__ void CatArrayBatchedCopy(
T* output,
CatArrInputTensorMetadata<T, IndexType, batch_size, stride_size> inputs,
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs.nElements[blockIdx.y];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> ins = stride_size > 1 ? inputs.tensorStride[blockIdx.y] : inputs.tensorStride[0];
bool isContig = inputs.isContiguous[blockIdx.y];
if(tid >= nElements) return;
T* data = inputs.input[blockIdx.y];
IndexType offset = inputs.offset[blockIdx.y];
IndexType dimSize = inputs.dimSize[blockIdx.y];
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.tensorSize, os.tensorStride, dimSize, concatDim, tid);
if (isContig) {
output[dataOffset + elementOffset] = data[tid];
} else {
IndexType inElementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
ins.tensorSize, ins.tensorStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[inElementOffset];
}
tid += stride;
}
}
template <typename scalar_t, int batch_size, int stride_size>
void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
CatArrInputTensorMetadata<scalar_t, unsigned int, batch_size, stride_size> catMetaData;
TensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> outputParam;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i);
outputParam.tensorStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
outputParam.tensorSize[0] = at::native::size(out, 0);
outputParam.tensorStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i + 1);
outputParam.tensorStride[i] = out.stride(i + 1);
}
outputParam.tensorSize[nDims - 1] = at::native::size(out, 1);
outputParam.tensorStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += batch_size) {
for (batchCounter = 0;
batchCounter < batch_size &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = 0;
// There is a legacy case where a 1-D empty tensor can be concat with
// high-dimensional tensor
if (inputs[i+batchCounter].numel() > 0) {
dimSize = at::native::size(inputs[i+batchCounter], dimension);
}
catMetaData.input[batchCounter] = inputs[i+batchCounter].data_ptr<scalar_t>();
catMetaData.offset[batchCounter] = offset;
catMetaData.dimSize[batchCounter] = dimSize;
catMetaData.nElements[batchCounter] = inputs[i+batchCounter].numel();
if (stride_size > 1) {
auto strides = inputs[i+batchCounter].strides();
auto sizes = inputs[i+batchCounter].sizes();
for(int j = 0; j < nDims; j++){
catMetaData.tensorStride[batchCounter].tensorSize[j] = sizes[j];
catMetaData.tensorStride[batchCounter].tensorStride[j] = strides[j];
}
catMetaData.isContiguous[batchCounter] = false;
} else {
catMetaData.isContiguous[batchCounter] = true;
}
// update offset
offset += dimSize;
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS, batch_size, stride_size>), \
catGrid, dim3(applyBlock), 0, stream.stream(), \
data, catMetaData, outputParam, dimension, outputParam.tensorStride[dimension]); \
C10_HIP_KERNEL_LAUNCH_CHECK();
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
}
}
} // namespace
Tensor cat_cuda(TensorList inputs, int64_t dimension) {
ScalarType high_type = result_type(inputs);
Tensor out = at::empty({0}, inputs.front().options().dtype(high_type));
at::native::cat_out_cuda(inputs, dimension, out);
return out;
}
inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) {
c10::optional<c10::MemoryFormat> format = c10::nullopt;
for (auto &t : inputs) {
auto f = t.suggest_memory_format();
if (!format.has_value()) {
format = f;
continue;
}
if (format.value() == f) {
continue;
}
bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f);
if (contiguous) {
return c10::MemoryFormat::Contiguous;
}
}
return format.value();
}
Tensor& cat_out_cuda(TensorList inputs, int64_t dimension, Tensor& out) {
check_cat_no_zero_dim(inputs);
dimension = legacy_cat_wrap_dim(dimension, inputs);
// previously, size [0] tensors were the only possible empty tensors; thus, it
// wasn't possible to cat empty tensors unless all the other tensors were
// 1-dimensional, so we allowed these tensors to be "skipped". We maintain
// this behavior for backwards compatibility, but only for this specific size
// (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
auto should_skip = [](const Tensor &t) {
return t.dim() == 1 && at::native::size(t, 0) == 0;
};
const Tensor *notSkippedTensor = NULL; // non-owning reference
int nDims = 0;
// Check for type promotion
TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "torch.cat(): input types ",
" can't be cast to the desired output type ",
out.scalar_type());
// Inputs cannot alias the output tensor
for (int i = 0; i < inputs.size(); i++) {
auto lap = at::get_overlap_status(out, inputs[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL &&
lap != at::MemOverlapStatus::FULL,
"torch.cat(): unsupported operation: the input tensors cannot refer to any "
"of the output memory locations. Found overlap in input "
"tensor ", i);
}
at::assert_no_internal_overlap(out);
for (int i = 0; i < inputs.size(); i++) {
if (should_skip(inputs[i])) {
continue;
}
nDims = inputs[i].dim();
notSkippedTensor = &inputs[i];
break;
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return out;
}
TORCH_CHECK(inputs.size() > 0, "torch.cat(): invalid number of inputs ", inputs.size());
TORCH_CHECK(dimension >= 0, "torch.cat(): invalid dimension ", dimension);
for (const Tensor& t: inputs) {
TORCH_CHECK(t.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors must be on the same device. Received ",
t.device(), " and ", notSkippedTensor->device());
}
TORCH_CHECK(
out.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors and out must be on the same device, but inputs are on ",
notSkippedTensor->device(), " and out is on ", out.device());
c10::MemoryFormat memory_format = compute_output_memory_format(inputs);
std::vector<int64_t> size(notSkippedTensor->sizes().vec());
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < inputs.size(); i++) {
const Tensor &tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
check_cat_shape_except_dim(*notSkippedTensor, tensor, dimension, i);
cat_dim_size += at::native::size(tensor, dimension);
}
// Compute the size of the result
size[dimension] = cat_dim_size;
// skip resizing if size of result is same as expected
// raise a warning while resizing if output has one or more elements
// See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// for understanding why at::native::resize_output is not called directly.
// if (at::native::resize_output_check(out, size)) {
// TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
if (out.sizes() != size) {
out.resize_(size, memory_format);
}
if (out.numel() == 0) {
return out;
}
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. The out tensor is 32-bit indexable
// 3. The number of dimensions is <= 4
// 4. All input tensors are contiguous (output tensor may be non-contig)
// 5. All input tensors can use 32-bit indexing
const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(),
[] (const Tensor& t) {
return at::cuda::detail::canUse32BitIndexMath(t);
});
const bool allContiguous = std::all_of(inputs.begin(), inputs.end(),
[=](const Tensor& t) {
return !t.defined() || t.is_contiguous(memory_format);
});
ScalarType firstType = inputs[0].scalar_type();
bool allSameType = std::all_of(inputs.begin(), inputs.end(),
[firstType](const Tensor& t) {
return t.scalar_type() == firstType;
});
allSameType = allSameType && (out.scalar_type() == firstType);
// We support the contiguous inputs and non-contiguous input (<=4 dims) in different ways
// For contiguous input, we don't need to pass stride meta data to cuda kernel through constant
// memory. Therefore, we could pass more inputs to cuda threads.
// For non-contiguous, we reduce the number of inputs passed to cuda kernel due to the limitation
// of constant memory.
if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE, 1>(out, inputs, dimension, nDims, memory_format);
});
} else if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
nDims <= CAT_ARRAY_MAX_INPUT_DIMS &&
all32BitIndexable &&
allSameType &&
memory_format == c10::MemoryFormat::Contiguous) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE/2, CAT_ARRAY_BATCH_SIZE/2>(out, inputs, dimension, nDims, memory_format);
});
} else {
int64_t offset = 0;
for (int j = 0; j < inputs.size(); j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = at::native::size(inputs[j], dimension);
Tensor nt = at::narrow(out, dimension, offset, dimSize);
copy_(nt, inputs[j]);
offset += dimSize;
}
}
return out;
}
} // namespace native
} // namespace at
|
0fd06ba9ff9dc4e1cc4ff150bf12d5ce0213bb6c.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/Resize.h>
#include <ATen/native/TypeProperties.h>
#include <ATen/native/TensorShape.h>
#include <ATen/Dispatch.h>
#include <c10/core/MemoryFormat.h>
#include <c10/util/Optional.h>
namespace at {
namespace native {
constexpr int CAT_ARRAY_BATCH_SIZE = 128;
constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4;
namespace {
inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) {
const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
//X dim of grid for cat array cooperates on a single tensor in the cat.
//Given half of the GPU, full utilization will always occur.
grid = dim3( 2LL * numSM, (long long) nTensors );
return true;
}
// Similar to any other IndexToOffset calculation for copying along a given
// dimension.
template <typename IndexType, int Dims>
struct CatArrIndexToOffset {
static inline __device__ IndexType compute(
const IndexType tensorSize[Dims],
const IndexType tensorStride[Dims],
const IndexType dimSize,
const unsigned int concatDim,
IndexType linearIndex) {
// linearIndex is not really linear index, but instead the offset in
// input tensor. If the input tensor is contiguous, then this offset
// is the linear index, but if the input tensor is channels last, then
// it is the linear index of the permuted contiguous tensor
IndexType offset = 0;
#pragma unroll
for (int i = Dims - 1; i >= 1; --i) {
IndexType curDimSize = i == concatDim ? dimSize : tensorSize[i];
IndexType nextDimIndex = linearIndex / curDimSize;
IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex;
IndexType curDimOffset = curDimIndex * tensorStride[i];
offset += curDimOffset;
linearIndex = nextDimIndex;
}
return offset + linearIndex * tensorStride[0];
}
};
template<typename IndexType, unsigned int MaxDims>
struct TensorSizeStride {
IndexType tensorSize[MaxDims];
IndexType tensorStride[MaxDims];
};
/**
* Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a
* grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to
* copy each element from each input tensor into the output.
*
* output: base pointer to the storage associated with the output tensor
* inputs: GPU-allocated array of input metadata for each input to concatenate
* in the kernel
* os: the size/stride vectors for the output tensor
* concatDim: dimension along which we are concatenating
* dimStride: the stride of the output tensor at the concatDim
*
* The most important assumption made is that the input tensors are contiguous.
*/
// pass meta data directly through kernel argument instead of pin memory
// In contiguous case, we will not need stride_size, setting it as 1 as placeholder
// to pass compile.
template <typename T, typename IndexType, int n, int stride_size>
struct CatArrInputTensorMetadata {
T* input[n];
IndexType offset[n];
IndexType dimSize[n];
IndexType nElements[n];
bool isContiguous[n];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> tensorStride[stride_size];
};
template <typename T, typename IndexType, int Dims, int batch_size, int stride_size>
__global__ void CatArrayBatchedCopy(
T* output,
CatArrInputTensorMetadata<T, IndexType, batch_size, stride_size> inputs,
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs.nElements[blockIdx.y];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> ins = stride_size > 1 ? inputs.tensorStride[blockIdx.y] : inputs.tensorStride[0];
bool isContig = inputs.isContiguous[blockIdx.y];
if(tid >= nElements) return;
T* data = inputs.input[blockIdx.y];
IndexType offset = inputs.offset[blockIdx.y];
IndexType dimSize = inputs.dimSize[blockIdx.y];
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.tensorSize, os.tensorStride, dimSize, concatDim, tid);
if (isContig) {
output[dataOffset + elementOffset] = data[tid];
} else {
IndexType inElementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
ins.tensorSize, ins.tensorStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[inElementOffset];
}
tid += stride;
}
}
template <typename scalar_t, int batch_size, int stride_size>
void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
CatArrInputTensorMetadata<scalar_t, unsigned int, batch_size, stride_size> catMetaData;
TensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> outputParam;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i);
outputParam.tensorStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
outputParam.tensorSize[0] = at::native::size(out, 0);
outputParam.tensorStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i + 1);
outputParam.tensorStride[i] = out.stride(i + 1);
}
outputParam.tensorSize[nDims - 1] = at::native::size(out, 1);
outputParam.tensorStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += batch_size) {
for (batchCounter = 0;
batchCounter < batch_size &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = 0;
// There is a legacy case where a 1-D empty tensor can be concat with
// high-dimensional tensor
if (inputs[i+batchCounter].numel() > 0) {
dimSize = at::native::size(inputs[i+batchCounter], dimension);
}
catMetaData.input[batchCounter] = inputs[i+batchCounter].data_ptr<scalar_t>();
catMetaData.offset[batchCounter] = offset;
catMetaData.dimSize[batchCounter] = dimSize;
catMetaData.nElements[batchCounter] = inputs[i+batchCounter].numel();
if (stride_size > 1) {
auto strides = inputs[i+batchCounter].strides();
auto sizes = inputs[i+batchCounter].sizes();
for(int j = 0; j < nDims; j++){
catMetaData.tensorStride[batchCounter].tensorSize[j] = sizes[j];
catMetaData.tensorStride[batchCounter].tensorStride[j] = strides[j];
}
catMetaData.isContiguous[batchCounter] = false;
} else {
catMetaData.isContiguous[batchCounter] = true;
}
// update offset
offset += dimSize;
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<scalar_t, unsigned int, DIMS, batch_size, stride_size><<<\
catGrid, applyBlock, 0, stream.stream()>>>(\
data, catMetaData, outputParam, dimension, outputParam.tensorStride[dimension]); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
}
}
} // namespace
Tensor cat_cuda(TensorList inputs, int64_t dimension) {
ScalarType high_type = result_type(inputs);
Tensor out = at::empty({0}, inputs.front().options().dtype(high_type));
at::native::cat_out_cuda(inputs, dimension, out);
return out;
}
inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) {
c10::optional<c10::MemoryFormat> format = c10::nullopt;
for (auto &t : inputs) {
auto f = t.suggest_memory_format();
if (!format.has_value()) {
format = f;
continue;
}
if (format.value() == f) {
continue;
}
bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f);
if (contiguous) {
return c10::MemoryFormat::Contiguous;
}
}
return format.value();
}
Tensor& cat_out_cuda(TensorList inputs, int64_t dimension, Tensor& out) {
check_cat_no_zero_dim(inputs);
dimension = legacy_cat_wrap_dim(dimension, inputs);
// previously, size [0] tensors were the only possible empty tensors; thus, it
// wasn't possible to cat empty tensors unless all the other tensors were
// 1-dimensional, so we allowed these tensors to be "skipped". We maintain
// this behavior for backwards compatibility, but only for this specific size
// (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
auto should_skip = [](const Tensor &t) {
return t.dim() == 1 && at::native::size(t, 0) == 0;
};
const Tensor *notSkippedTensor = NULL; // non-owning reference
int nDims = 0;
// Check for type promotion
TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "torch.cat(): input types ",
" can't be cast to the desired output type ",
out.scalar_type());
// Inputs cannot alias the output tensor
for (int i = 0; i < inputs.size(); i++) {
auto lap = at::get_overlap_status(out, inputs[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL &&
lap != at::MemOverlapStatus::FULL,
"torch.cat(): unsupported operation: the input tensors cannot refer to any "
"of the output memory locations. Found overlap in input "
"tensor ", i);
}
at::assert_no_internal_overlap(out);
for (int i = 0; i < inputs.size(); i++) {
if (should_skip(inputs[i])) {
continue;
}
nDims = inputs[i].dim();
notSkippedTensor = &inputs[i];
break;
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return out;
}
TORCH_CHECK(inputs.size() > 0, "torch.cat(): invalid number of inputs ", inputs.size());
TORCH_CHECK(dimension >= 0, "torch.cat(): invalid dimension ", dimension);
for (const Tensor& t: inputs) {
TORCH_CHECK(t.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors must be on the same device. Received ",
t.device(), " and ", notSkippedTensor->device());
}
TORCH_CHECK(
out.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors and out must be on the same device, but inputs are on ",
notSkippedTensor->device(), " and out is on ", out.device());
c10::MemoryFormat memory_format = compute_output_memory_format(inputs);
std::vector<int64_t> size(notSkippedTensor->sizes().vec());
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < inputs.size(); i++) {
const Tensor &tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
check_cat_shape_except_dim(*notSkippedTensor, tensor, dimension, i);
cat_dim_size += at::native::size(tensor, dimension);
}
// Compute the size of the result
size[dimension] = cat_dim_size;
// skip resizing if size of result is same as expected
// raise a warning while resizing if output has one or more elements
// See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// for understanding why at::native::resize_output is not called directly.
// if (at::native::resize_output_check(out, size)) {
// TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
if (out.sizes() != size) {
out.resize_(size, memory_format);
}
if (out.numel() == 0) {
return out;
}
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. The out tensor is 32-bit indexable
// 3. The number of dimensions is <= 4
// 4. All input tensors are contiguous (output tensor may be non-contig)
// 5. All input tensors can use 32-bit indexing
const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(),
[] (const Tensor& t) {
return at::cuda::detail::canUse32BitIndexMath(t);
});
const bool allContiguous = std::all_of(inputs.begin(), inputs.end(),
[=](const Tensor& t) {
return !t.defined() || t.is_contiguous(memory_format);
});
ScalarType firstType = inputs[0].scalar_type();
bool allSameType = std::all_of(inputs.begin(), inputs.end(),
[firstType](const Tensor& t) {
return t.scalar_type() == firstType;
});
allSameType = allSameType && (out.scalar_type() == firstType);
// We support the contiguous inputs and non-contiguous input (<=4 dims) in different ways
// For contiguous input, we don't need to pass stride meta data to cuda kernel through constant
// memory. Therefore, we could pass more inputs to cuda threads.
// For non-contiguous, we reduce the number of inputs passed to cuda kernel due to the limitation
// of constant memory.
if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE, 1>(out, inputs, dimension, nDims, memory_format);
});
} else if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
nDims <= CAT_ARRAY_MAX_INPUT_DIMS &&
all32BitIndexable &&
allSameType &&
memory_format == c10::MemoryFormat::Contiguous) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE/2, CAT_ARRAY_BATCH_SIZE/2>(out, inputs, dimension, nDims, memory_format);
});
} else {
int64_t offset = 0;
for (int j = 0; j < inputs.size(); j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = at::native::size(inputs[j], dimension);
Tensor nt = at::narrow(out, dimension, offset, dimSize);
copy_(nt, inputs[j]);
offset += dimSize;
}
}
return out;
}
} // namespace native
} // namespace at
|
f6e88777457ec5a3f68e1b6e5c800c940d88edda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// pour compiler : nvcc vecAdd.cu -o vecAdd
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n){
// identifiant global du thread dans la grille 1D
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// on s'assure de ne pas sortir des limites des tableaux a,b,c
if (tid < n){
//on effectue une addition lmentaire par thread
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t size = n*sizeof(float);
//////////////////////////////////////////
// Allocate memory for each vector on host
h_a = (float*) malloc (size);
h_b = (float*) malloc (size);
h_c = (float*) malloc (size);
/////////////////////////////////////////
// Allocate memory for each vector on GPU
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
/////////////////////////////////////////
// Copy host vectors to device
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
int blockSize, gridSize;
/////////////////////////////////////////
// Number of threads in each thread block
blockSize = 512;
////////////////////////////////////////
// Number of thread blocks in grid
gridSize = (n + blockSize - 1) / blockSize;;
///////////////////////////////////////
// Launch the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
///////////////////////////////////////
// Copy array back to host
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
/////////////////////////////////////////
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
////////////////////////////////////////
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
f6e88777457ec5a3f68e1b6e5c800c940d88edda.cu
|
// pour compiler : nvcc vecAdd.cu -o vecAdd
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n){
// identifiant global du thread dans la grille 1D
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// on s'assure de ne pas sortir des limites des tableaux a,b,c
if (tid < n){
//on effectue une addition élémentaire par thread
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t size = n*sizeof(float);
//////////////////////////////////////////
// Allocate memory for each vector on host
h_a = (float*) malloc (size);
h_b = (float*) malloc (size);
h_c = (float*) malloc (size);
/////////////////////////////////////////
// Allocate memory for each vector on GPU
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
/////////////////////////////////////////
// Copy host vectors to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
int blockSize, gridSize;
/////////////////////////////////////////
// Number of threads in each thread block
blockSize = 512;
////////////////////////////////////////
// Number of thread blocks in grid
gridSize = (n + blockSize - 1) / blockSize;;
///////////////////////////////////////
// Launch the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
///////////////////////////////////////
// Copy array back to host
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
/////////////////////////////////////////
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
////////////////////////////////////////
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
ad65528b180ae55a0b864fa8409032051a7246d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <vector>
#include <iostream>
const int NUM_THREADS_PER_BLOCK_SINGLE = 8;
const int NUM_THREADS_PER_BLOCK = NUM_THREADS_PER_BLOCK_SINGLE * NUM_THREADS_PER_BLOCK_SINGLE;
__shared__ float F[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FTransposeF[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FInverseTranspose[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FirstPiolaKirchoffTensor[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float Gradient[NUM_THREADS_PER_BLOCK][3][4];
__shared__ int LocalIndices[NUM_THREADS_PER_BLOCK][4];
__shared__ float LocalMasses[NUM_THREADS_PER_BLOCK][4];
__device__ float sqr(float x)
{
return x * x;
}
__device__ float traceFTransposeF(int idx)
{
return FTransposeF[idx][0][0] + FTransposeF[idx][1][1] + FTransposeF[idx][2][2];
}
__device__ float determinantFTransposeF(int idx)
{
return FTransposeF[idx][0][0]
* (FTransposeF[idx][1][1] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][1])
- FTransposeF[idx][0][1]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][0])
+ FTransposeF[idx][0][2]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][1] - FTransposeF[idx][1][1] * FTransposeF[idx][2][0]);
}
__device__ float determinantF(int idx)
{
return F[idx][0][0]
* (F[idx][1][1] * F[idx][2][2] - F[idx][1][2] * F[idx][2][1])
- F[idx][0][1]
* (F[idx][1][0] * F[idx][2][2] - F[idx][1][2] * F[idx][2][0])
+ F[idx][0][2]
* (F[idx][1][0] * F[idx][2][1] - F[idx][1][1] * F[idx][2][0]);
}
__device__ void calculateF(int idx, float* positions, float* refShapeMatrixInverse)
{
//1. Calculate Deformed Shape Matrix
FirstPiolaKirchoffTensor[idx][0][0] = positions[LocalIndices[idx][0] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][0] = positions[LocalIndices[idx][0] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][0] = positions[LocalIndices[idx][0] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][1] = positions[LocalIndices[idx][1] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][1] = positions[LocalIndices[idx][1] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][1] = positions[LocalIndices[idx][1] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][2] = positions[LocalIndices[idx][2] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][2] = positions[LocalIndices[idx][2] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][2] = positions[LocalIndices[idx][2] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
//printf("Local Indices: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%d, ", LocalIndices[idx][i]);
//}
//printf("\n");
//
//printf("Particles: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 0]);
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 1]);
// printf("%4.4f \n", positions[LocalIndices[idx][i] * 3 + 2]);
//}
//printf("Particles END \n");
//printf("\n");
//printf("Ref Shape Matrix: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.4f,", refShapeMatrixInverse[idx * 3 * 3 + row * 3 + col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FirstPiolaKirchoffTensor[idx][row][i] * refShapeMatrixInverse[idx * 3 * 3 + i * 3 + col];
}
F[idx][row][col] = sum;
}
}
}
__device__ void calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(int idx, float mu, float lambda, float I3)
{
//1. Copy over F multiplied with mu
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] = F[idx][row][col] * mu;
}
}
//3. Subtract mu times FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] -= FInverseTranspose[idx][row][col] * mu;
}
}
//4. Add (lambda * logI3) / 2.0 * FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] += FInverseTranspose[idx][row][col] * ((lambda * log(I3)) / 2.0f);
}
}
}
__device__ float calculateStrainEnergy_NEO_HOOKEAN(float volume, float lambda, float mu, float I1, float I3)
{
return volume * (0.5f * mu * (I1 - log(I3) - 3.0f) + (lambda / 8.0f) * (log(I3) * log(I3)));
}
__device__ void calculateStrainEnergyGradient_NEO_HOOKEAN(int idx, float volume, float* refShapeMatrixInverse)
{
//1. Copy refShapeMatrixInverse from global memory
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = refShapeMatrixInverse[idx * 3 + row * 3 + col];
}
}
//2. Multiply by volume
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] *= volume;
}
}
//3. Multiply with First Piola-Kirchoff Stress tensor
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += Gradient[idx][row][i] * FirstPiolaKirchoffTensor[idx][i][col];
}
FTransposeF[idx][col][row] = sum;
}
}
//4. Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = FTransposeF[idx][row][col];
}
}
//4. Calculate last column
for (int row = 0; row < 3; ++row)
{
float sum = 0.0f;
for (int col = 0; col < 3; ++col)
{
sum += Gradient[idx][row][col];
}
Gradient[idx][row][3] = -sum;
}
}
__device__ void calculateFTransposeF(int idx)
{
//Combine all into one loop in future!
//1. Copy over F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = F[idx][row][col];
}
}
//2. Transpose F (Subsume into multiplication later!)
float temp;
temp = FTransposeF[idx][0][1];
FTransposeF[idx][0][1] = FTransposeF[idx][1][0];
FTransposeF[idx][1][0] = temp;
temp = FTransposeF[idx][0][2];
FTransposeF[idx][0][2] = FTransposeF[idx][2][0];
FTransposeF[idx][2][0] = temp;
temp = FTransposeF[idx][1][2];
FTransposeF[idx][1][2] = FTransposeF[idx][2][1];
FTransposeF[idx][2][1] = temp;
//printf("FTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Multiply with F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FTransposeF[idx][row][i] * F[idx][i][col];
}
FirstPiolaKirchoffTensor[idx][row][col] = sum;
}
}
//Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = FirstPiolaKirchoffTensor[idx][row][col];
}
}
}
__device__ void calculateFInverseTranspose(int idx)
{
//1. Calculate cofactors
FInverseTranspose[idx][0][0] = F[idx][1][1] * F[idx][2][2] - F[idx][2][1] * F[idx][1][2];
FInverseTranspose[idx][0][1] = -(F[idx][1][0] * F[idx][2][2] - F[idx][2][0] * F[idx][1][2]);
FInverseTranspose[idx][0][2] = F[idx][1][0] * F[idx][2][1] - F[idx][2][0] * F[idx][1][1];
FInverseTranspose[idx][1][0] = -(F[idx][0][1] * F[idx][2][2] - F[idx][2][1] * F[idx][0][2]);
FInverseTranspose[idx][1][1] = F[idx][0][0] * F[idx][2][2] - F[idx][2][0] * F[idx][0][2];
FInverseTranspose[idx][1][2] = -(F[idx][0][0] * F[idx][2][1] - F[idx][2][0] * F[idx][0][1]);
FInverseTranspose[idx][2][0] = F[idx][0][1] * F[idx][1][2] - F[idx][1][1] * F[idx][0][2];
FInverseTranspose[idx][2][1] = -(F[idx][0][0] * F[idx][1][2] - F[idx][1][0] * F[idx][0][2]);
FInverseTranspose[idx][2][2] = F[idx][0][0] * F[idx][1][1] - F[idx][1][0] * F[idx][0][1];
//2. Transpose (Alread in Co-factor calculation)
//float temp;
//temp = FInverseTranspose[idx][0][1];
//FInverseTranspose[idx][0][1] = FInverseTranspose[idx][1][0];
//FInverseTranspose[idx][1][0] = temp;
//temp = FInverseTranspose[idx][0][2];
//FInverseTranspose[idx][0][2] = FInverseTranspose[idx][2][0];
//FInverseTranspose[idx][2][0] = temp;
//temp = FInverseTranspose[idx][1][2];
//FInverseTranspose[idx][1][2] = FInverseTranspose[idx][2][1];
//FInverseTranspose[idx][2][1] = temp;
//3. Calculate the determinant
float determinant = determinantF(idx);
//printf("Determinant of F: %4.8f \n", determinant);
//4. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FInverseTranspose[idx][row][col] /= determinant;
}
}
}
__device__ float squaredNormGradient(int idx, int particleIdx)
{
return sqrtf(sqr(Gradient[idx][0][particleIdx])
+ sqr(Gradient[idx][1][particleIdx])
+ sqr(Gradient[idx][2][particleIdx]));
}
__device__ float calculateLagrangeMultiplierDenominator(int idx, float* inverseMass)
{
float denominator = 0.0f;
for (int i = 0; i < 4; ++i)
{
denominator += LocalMasses[idx][i] * squaredNormGradient(idx, i);
//printf("Denominator Component: %4.8f \n", inverseMass[LocalIndices[idx][i]] * squaredNormGradient(idx, i));
}
//printf("Denominator: %4.8f \n", denominator);
return denominator;
}
__device__ void updatePositions(int idx, float lagrangeMultiplier, float* positions, float* inverseMass)
{
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 3; ++j)
{
atomicAdd(&positions[LocalIndices[idx][i] * 3 + j], LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
//printf("Position Update %4.8f \n", LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
}
printf("\n");
}
}
__device__ void getIndices(int idx, int* indices)
{
for (int i = 0; i < 4; ++i)
{
LocalIndices[idx][i] = indices[idx * 4 + i];
}
}
__device__ void getMasses(int idx, float* masses)
{
for (int i = 0; i < 4; ++i)
{
LocalMasses[idx][i] = masses[LocalIndices[idx][i]];
}
}
__global__ void solveFEMConstraint(float* positions, int* indices, float* inverseMass, float* volume, float* refShapeMatrixInverse,
float lambda, float mu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
getIndices(idx, indices);
getMasses(idx, inverseMass);
//1. Calculate Deformation Gradient F
calculateF(idx, positions, refShapeMatrixInverse);
//printf("F: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", F[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Compute Cauchy Tensors
calculateFInverseTranspose(idx);
//printf("FInverseTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FInverseTranspose[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
calculateFTransposeF(idx);
//printf("FTransposeF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Compute Invariants
float I1 = traceFTransposeF(idx);
float I3 = determinantFTransposeF(idx);
//printf("I1 = %4.8f \n", I1);
//printf("I3 = %4.8f \n", I3);
//4. Calculate First Piola-Kirchoff Stress Tensor
calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(idx, mu, lambda, I3);
//printf("PF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FirstPiolaKirchoffTensor[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//5. Calculate StrainEnergy
float strainEnergy = calculateStrainEnergy_NEO_HOOKEAN(volume[idx], lambda, mu, I1, I3);
//printf("StrainEnergy = %4.8f \n", strainEnergy);
//6. Calculate Strain Energy Gradient
calculateStrainEnergyGradient_NEO_HOOKEAN(idx, volume[idx], refShapeMatrixInverse);
//printf("Strain Energy Gradient: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 4; ++col)
// {
// printf("%4.8f,", Gradient[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//7. Calculate Lagrange Multiplier
float lagrangeMultiplier = - (strainEnergy / calculateLagrangeMultiplierDenominator(idx, inverseMass));
//printf("lagrangeMultiplier = %4.8f \n", lagrangeMultiplier);
//8. Update Positions
updatePositions(idx, lagrangeMultiplier, positions, inverseMass);
}
hipError_t projectConstraints(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void setUpSystem(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
float gravity, float deltaT)
{
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(0.0f);
originalPositions.push_back(-0.946f); originalPositions.push_back(0.0f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(0.515f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(-0.757f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(-2.576f);
indices.push_back(3); indices.push_back(0); indices.push_back(2); indices.push_back(1);
indices.push_back(3); indices.push_back(4); indices.push_back(1); indices.push_back(2);
for (int i = 0; i < 5; ++i)
{
inverseMasses.push_back(1.0f);
}
inverseMasses[0] = 0.0f;
for (int i = 0; i < originalPositions.size(); ++i)
{
positions.push_back(originalPositions[i]);
}
//apply one time step of deformations
for (int i = 0; i < 5; ++i)
{
positions[i * 3 + 1] += inverseMasses[i] * gravity * deltaT;
}
//FROM MATLAB
volumes.push_back(0.38613f);
volumes.push_back(0.50676f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(-0.210285005566797f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.8976660682226210f);
refShapeMatrixInverses.push_back(0.3639913065220320f);
refShapeMatrixInverses.push_back(0.7861635220125790f);
refShapeMatrixInverses.push_back(-0.309098542163233f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(0.1602308455550010f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(-0.683994528043776f);
refShapeMatrixInverses.push_back(-0.611620795107034f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2882398959156950f);
}
int main()
{
std::vector<int> indices;
std::vector<float> originalPositions;
std::vector<float> positions;
std::vector<float> inverseMasses;
std::vector<float> refShapeMatrixInverses;
std::vector<float> volumes;
float deltaT = 0.5f;
float gravity = -9.8f;
float mu = 0.769231f;
float lambda = 1.15385f;
setUpSystem(indices, originalPositions, positions, inverseMasses, refShapeMatrixInverses, volumes, gravity, deltaT);
std::vector<float> positionsResultDevice(positions.size());
std::vector<float> positionsResultHost(positions.size());
//CPU
projectConstraintsHOST(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultHost, lambda, mu);
//GPU
hipError_t cudaStatus = projectConstraints(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultDevice, lambda, mu);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Critical Error, aborting...");
return 1;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
//Print Some Results
std::cout << "INPUT POSITIONS: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positions[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION HOST: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultHost[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION DEVICE: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultDevice[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
return 0;
}
hipError_t cudaErrorWrapper(hipError_t status)
{
if (status != hipSuccess) {
fprintf(stderr, "Critical Error occured!");
std::cout << "ERROR Details: " << hipGetErrorString(status) << std::endl;
}
return status;
}
void getCudaDeviceProperties(int device)
{
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
std::cout << "Compute Capabilities for " << properties.name << " : " << std::endl;
std::cout << "Major: " << properties.major << ", Minor: " << properties.minor << std::endl;
std::cout << "Details: " << std::endl;
std::cout << " Num of SM : " << properties.multiProcessorCount << std::endl;
std::cout << " Mem per Block: " << properties.sharedMemPerBlock << std::endl;
std::cout << " Mem per SM : " << properties.sharedMemPerMultiprocessor << std::endl;
}
hipError_t projectConstraints(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
float* dev_positions;
float* dev_inverseMasses;
int* dev_indices;
float* dev_refShapeMatrixInverses;
float* dev_volumes;
hipError_t deviceStatus;
//Allocate memory
int deviceCount = 0;
deviceStatus == hipGetDeviceCount(&deviceCount);
std::cout << "Num CUDA Devices Found: " << deviceCount << std::endl;
deviceStatus = cudaErrorWrapper(hipSetDevice(0));
getCudaDeviceProperties(0);
deviceStatus = cudaErrorWrapper(hipMalloc((void**)&dev_indices, indices.size() * sizeof(int)));
deviceStatus = cudaErrorWrapper(hipMalloc((void**)&dev_positions, positions.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(hipMalloc((void**)&dev_inverseMasses, inverseMasses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(hipMalloc((void**)&dev_refShapeMatrixInverses, refShapeMatrixInverses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(hipMalloc((void**)&dev_volumes, volumes.size() * sizeof(float)));
//Cpy memory
deviceStatus = cudaErrorWrapper(hipMemcpy(dev_indices, &indices[0], indices.size() * sizeof(int), hipMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(hipMemcpy(dev_positions, &positions[0], positions.size() * sizeof(float), hipMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(hipMemcpy(dev_inverseMasses, &inverseMasses[0], inverseMasses.size() * sizeof(float), hipMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(hipMemcpy(dev_refShapeMatrixInverses, &refShapeMatrixInverses[0], refShapeMatrixInverses.size() * sizeof(float), hipMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(hipMemcpy(dev_volumes, &volumes[0], volumes.size() * sizeof(float), hipMemcpyHostToDevice));
//Execute Kernel
hipLaunchKernelGGL(( solveFEMConstraint), dim3(1), dim3(1), 0, 0, dev_positions, dev_indices, dev_inverseMasses, dev_volumes, dev_refShapeMatrixInverses, lambda, mu);
hipDeviceSynchronize();
//Cpy memory back
positions_result.resize(positions.size());
deviceStatus = cudaErrorWrapper(hipMemcpy(&positions_result[0], dev_positions, positions_result.size() * sizeof(float), hipMemcpyDeviceToHost));
//Free memory
hipFree(dev_positions);
hipFree(dev_inverseMasses);
hipFree(dev_indices);
hipFree(dev_refShapeMatrixInverses);
hipFree(dev_volumes);
return deviceStatus;
}
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
positions_result.clear();
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(-0.86112528478748700f);
positions_result.push_back(-4.37303501877824000f);
positions_result.push_back(-1.16888554066580000f);
positions_result.push_back(0.645803837424706000f);
positions_result.push_back(-4.08169452857322000f);
positions_result.push_back(-1.97921356664365000f);
positions_result.push_back(0.656806413004164000f);
positions_result.push_back(-5.20915823509948000f);
positions_result.push_back(-0.28630813323995600f);
positions_result.push_back(-0.00948496564138351f);
positions_result.push_back(-4.91178046790357000f);
positions_result.push_back(-2.48359275945060000f);
}
|
ad65528b180ae55a0b864fa8409032051a7246d4.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <vector>
#include <iostream>
const int NUM_THREADS_PER_BLOCK_SINGLE = 8;
const int NUM_THREADS_PER_BLOCK = NUM_THREADS_PER_BLOCK_SINGLE * NUM_THREADS_PER_BLOCK_SINGLE;
__shared__ float F[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FTransposeF[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FInverseTranspose[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FirstPiolaKirchoffTensor[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float Gradient[NUM_THREADS_PER_BLOCK][3][4];
__shared__ int LocalIndices[NUM_THREADS_PER_BLOCK][4];
__shared__ float LocalMasses[NUM_THREADS_PER_BLOCK][4];
__device__ float sqr(float x)
{
return x * x;
}
__device__ float traceFTransposeF(int idx)
{
return FTransposeF[idx][0][0] + FTransposeF[idx][1][1] + FTransposeF[idx][2][2];
}
__device__ float determinantFTransposeF(int idx)
{
return FTransposeF[idx][0][0]
* (FTransposeF[idx][1][1] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][1])
- FTransposeF[idx][0][1]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][0])
+ FTransposeF[idx][0][2]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][1] - FTransposeF[idx][1][1] * FTransposeF[idx][2][0]);
}
__device__ float determinantF(int idx)
{
return F[idx][0][0]
* (F[idx][1][1] * F[idx][2][2] - F[idx][1][2] * F[idx][2][1])
- F[idx][0][1]
* (F[idx][1][0] * F[idx][2][2] - F[idx][1][2] * F[idx][2][0])
+ F[idx][0][2]
* (F[idx][1][0] * F[idx][2][1] - F[idx][1][1] * F[idx][2][0]);
}
__device__ void calculateF(int idx, float* positions, float* refShapeMatrixInverse)
{
//1. Calculate Deformed Shape Matrix
FirstPiolaKirchoffTensor[idx][0][0] = positions[LocalIndices[idx][0] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][0] = positions[LocalIndices[idx][0] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][0] = positions[LocalIndices[idx][0] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][1] = positions[LocalIndices[idx][1] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][1] = positions[LocalIndices[idx][1] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][1] = positions[LocalIndices[idx][1] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][2] = positions[LocalIndices[idx][2] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][2] = positions[LocalIndices[idx][2] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][2] = positions[LocalIndices[idx][2] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
//printf("Local Indices: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%d, ", LocalIndices[idx][i]);
//}
//printf("\n");
//
//printf("Particles: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 0]);
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 1]);
// printf("%4.4f \n", positions[LocalIndices[idx][i] * 3 + 2]);
//}
//printf("Particles END \n");
//printf("\n");
//printf("Ref Shape Matrix: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.4f,", refShapeMatrixInverse[idx * 3 * 3 + row * 3 + col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FirstPiolaKirchoffTensor[idx][row][i] * refShapeMatrixInverse[idx * 3 * 3 + i * 3 + col];
}
F[idx][row][col] = sum;
}
}
}
__device__ void calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(int idx, float mu, float lambda, float I3)
{
//1. Copy over F multiplied with mu
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] = F[idx][row][col] * mu;
}
}
//3. Subtract mu times FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] -= FInverseTranspose[idx][row][col] * mu;
}
}
//4. Add (lambda * logI3) / 2.0 * FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] += FInverseTranspose[idx][row][col] * ((lambda * log(I3)) / 2.0f);
}
}
}
__device__ float calculateStrainEnergy_NEO_HOOKEAN(float volume, float lambda, float mu, float I1, float I3)
{
return volume * (0.5f * mu * (I1 - log(I3) - 3.0f) + (lambda / 8.0f) * (log(I3) * log(I3)));
}
__device__ void calculateStrainEnergyGradient_NEO_HOOKEAN(int idx, float volume, float* refShapeMatrixInverse)
{
//1. Copy refShapeMatrixInverse from global memory
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = refShapeMatrixInverse[idx * 3 + row * 3 + col];
}
}
//2. Multiply by volume
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] *= volume;
}
}
//3. Multiply with First Piola-Kirchoff Stress tensor
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += Gradient[idx][row][i] * FirstPiolaKirchoffTensor[idx][i][col];
}
FTransposeF[idx][col][row] = sum;
}
}
//4. Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = FTransposeF[idx][row][col];
}
}
//4. Calculate last column
for (int row = 0; row < 3; ++row)
{
float sum = 0.0f;
for (int col = 0; col < 3; ++col)
{
sum += Gradient[idx][row][col];
}
Gradient[idx][row][3] = -sum;
}
}
__device__ void calculateFTransposeF(int idx)
{
//Combine all into one loop in future!
//1. Copy over F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = F[idx][row][col];
}
}
//2. Transpose F (Subsume into multiplication later!)
float temp;
temp = FTransposeF[idx][0][1];
FTransposeF[idx][0][1] = FTransposeF[idx][1][0];
FTransposeF[idx][1][0] = temp;
temp = FTransposeF[idx][0][2];
FTransposeF[idx][0][2] = FTransposeF[idx][2][0];
FTransposeF[idx][2][0] = temp;
temp = FTransposeF[idx][1][2];
FTransposeF[idx][1][2] = FTransposeF[idx][2][1];
FTransposeF[idx][2][1] = temp;
//printf("FTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Multiply with F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FTransposeF[idx][row][i] * F[idx][i][col];
}
FirstPiolaKirchoffTensor[idx][row][col] = sum;
}
}
//Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = FirstPiolaKirchoffTensor[idx][row][col];
}
}
}
__device__ void calculateFInverseTranspose(int idx)
{
//1. Calculate cofactors
FInverseTranspose[idx][0][0] = F[idx][1][1] * F[idx][2][2] - F[idx][2][1] * F[idx][1][2];
FInverseTranspose[idx][0][1] = -(F[idx][1][0] * F[idx][2][2] - F[idx][2][0] * F[idx][1][2]);
FInverseTranspose[idx][0][2] = F[idx][1][0] * F[idx][2][1] - F[idx][2][0] * F[idx][1][1];
FInverseTranspose[idx][1][0] = -(F[idx][0][1] * F[idx][2][2] - F[idx][2][1] * F[idx][0][2]);
FInverseTranspose[idx][1][1] = F[idx][0][0] * F[idx][2][2] - F[idx][2][0] * F[idx][0][2];
FInverseTranspose[idx][1][2] = -(F[idx][0][0] * F[idx][2][1] - F[idx][2][0] * F[idx][0][1]);
FInverseTranspose[idx][2][0] = F[idx][0][1] * F[idx][1][2] - F[idx][1][1] * F[idx][0][2];
FInverseTranspose[idx][2][1] = -(F[idx][0][0] * F[idx][1][2] - F[idx][1][0] * F[idx][0][2]);
FInverseTranspose[idx][2][2] = F[idx][0][0] * F[idx][1][1] - F[idx][1][0] * F[idx][0][1];
//2. Transpose (Alread in Co-factor calculation)
//float temp;
//temp = FInverseTranspose[idx][0][1];
//FInverseTranspose[idx][0][1] = FInverseTranspose[idx][1][0];
//FInverseTranspose[idx][1][0] = temp;
//temp = FInverseTranspose[idx][0][2];
//FInverseTranspose[idx][0][2] = FInverseTranspose[idx][2][0];
//FInverseTranspose[idx][2][0] = temp;
//temp = FInverseTranspose[idx][1][2];
//FInverseTranspose[idx][1][2] = FInverseTranspose[idx][2][1];
//FInverseTranspose[idx][2][1] = temp;
//3. Calculate the determinant
float determinant = determinantF(idx);
//printf("Determinant of F: %4.8f \n", determinant);
//4. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FInverseTranspose[idx][row][col] /= determinant;
}
}
}
__device__ float squaredNormGradient(int idx, int particleIdx)
{
return sqrtf(sqr(Gradient[idx][0][particleIdx])
+ sqr(Gradient[idx][1][particleIdx])
+ sqr(Gradient[idx][2][particleIdx]));
}
__device__ float calculateLagrangeMultiplierDenominator(int idx, float* inverseMass)
{
float denominator = 0.0f;
for (int i = 0; i < 4; ++i)
{
denominator += LocalMasses[idx][i] * squaredNormGradient(idx, i);
//printf("Denominator Component: %4.8f \n", inverseMass[LocalIndices[idx][i]] * squaredNormGradient(idx, i));
}
//printf("Denominator: %4.8f \n", denominator);
return denominator;
}
__device__ void updatePositions(int idx, float lagrangeMultiplier, float* positions, float* inverseMass)
{
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 3; ++j)
{
atomicAdd(&positions[LocalIndices[idx][i] * 3 + j], LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
//printf("Position Update %4.8f \n", LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
}
printf("\n");
}
}
__device__ void getIndices(int idx, int* indices)
{
for (int i = 0; i < 4; ++i)
{
LocalIndices[idx][i] = indices[idx * 4 + i];
}
}
__device__ void getMasses(int idx, float* masses)
{
for (int i = 0; i < 4; ++i)
{
LocalMasses[idx][i] = masses[LocalIndices[idx][i]];
}
}
__global__ void solveFEMConstraint(float* positions, int* indices, float* inverseMass, float* volume, float* refShapeMatrixInverse,
float lambda, float mu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
getIndices(idx, indices);
getMasses(idx, inverseMass);
//1. Calculate Deformation Gradient F
calculateF(idx, positions, refShapeMatrixInverse);
//printf("F: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", F[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Compute Cauchy Tensors
calculateFInverseTranspose(idx);
//printf("FInverseTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FInverseTranspose[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
calculateFTransposeF(idx);
//printf("FTransposeF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Compute Invariants
float I1 = traceFTransposeF(idx);
float I3 = determinantFTransposeF(idx);
//printf("I1 = %4.8f \n", I1);
//printf("I3 = %4.8f \n", I3);
//4. Calculate First Piola-Kirchoff Stress Tensor
calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(idx, mu, lambda, I3);
//printf("PF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FirstPiolaKirchoffTensor[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//5. Calculate StrainEnergy
float strainEnergy = calculateStrainEnergy_NEO_HOOKEAN(volume[idx], lambda, mu, I1, I3);
//printf("StrainEnergy = %4.8f \n", strainEnergy);
//6. Calculate Strain Energy Gradient
calculateStrainEnergyGradient_NEO_HOOKEAN(idx, volume[idx], refShapeMatrixInverse);
//printf("Strain Energy Gradient: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 4; ++col)
// {
// printf("%4.8f,", Gradient[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//7. Calculate Lagrange Multiplier
float lagrangeMultiplier = - (strainEnergy / calculateLagrangeMultiplierDenominator(idx, inverseMass));
//printf("lagrangeMultiplier = %4.8f \n", lagrangeMultiplier);
//8. Update Positions
updatePositions(idx, lagrangeMultiplier, positions, inverseMass);
}
cudaError_t projectConstraints(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void setUpSystem(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
float gravity, float deltaT)
{
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(0.0f);
originalPositions.push_back(-0.946f); originalPositions.push_back(0.0f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(0.515f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(-0.757f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(-2.576f);
indices.push_back(3); indices.push_back(0); indices.push_back(2); indices.push_back(1);
indices.push_back(3); indices.push_back(4); indices.push_back(1); indices.push_back(2);
for (int i = 0; i < 5; ++i)
{
inverseMasses.push_back(1.0f);
}
inverseMasses[0] = 0.0f;
for (int i = 0; i < originalPositions.size(); ++i)
{
positions.push_back(originalPositions[i]);
}
//apply one time step of deformations
for (int i = 0; i < 5; ++i)
{
positions[i * 3 + 1] += inverseMasses[i] * gravity * deltaT;
}
//FROM MATLAB
volumes.push_back(0.38613f);
volumes.push_back(0.50676f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(-0.210285005566797f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.8976660682226210f);
refShapeMatrixInverses.push_back(0.3639913065220320f);
refShapeMatrixInverses.push_back(0.7861635220125790f);
refShapeMatrixInverses.push_back(-0.309098542163233f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(0.1602308455550010f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(-0.683994528043776f);
refShapeMatrixInverses.push_back(-0.611620795107034f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2882398959156950f);
}
int main()
{
std::vector<int> indices;
std::vector<float> originalPositions;
std::vector<float> positions;
std::vector<float> inverseMasses;
std::vector<float> refShapeMatrixInverses;
std::vector<float> volumes;
float deltaT = 0.5f;
float gravity = -9.8f;
float mu = 0.769231f;
float lambda = 1.15385f;
setUpSystem(indices, originalPositions, positions, inverseMasses, refShapeMatrixInverses, volumes, gravity, deltaT);
std::vector<float> positionsResultDevice(positions.size());
std::vector<float> positionsResultHost(positions.size());
//CPU
projectConstraintsHOST(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultHost, lambda, mu);
//GPU
cudaError_t cudaStatus = projectConstraints(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultDevice, lambda, mu);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Critical Error, aborting...");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//Print Some Results
std::cout << "INPUT POSITIONS: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positions[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION HOST: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultHost[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION DEVICE: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultDevice[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
return 0;
}
cudaError_t cudaErrorWrapper(cudaError_t status)
{
if (status != cudaSuccess) {
fprintf(stderr, "Critical Error occured!");
std::cout << "ERROR Details: " << cudaGetErrorString(status) << std::endl;
}
return status;
}
void getCudaDeviceProperties(int device)
{
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
std::cout << "Compute Capabilities for " << properties.name << " : " << std::endl;
std::cout << "Major: " << properties.major << ", Minor: " << properties.minor << std::endl;
std::cout << "Details: " << std::endl;
std::cout << " Num of SM : " << properties.multiProcessorCount << std::endl;
std::cout << " Mem per Block: " << properties.sharedMemPerBlock << std::endl;
std::cout << " Mem per SM : " << properties.sharedMemPerMultiprocessor << std::endl;
}
cudaError_t projectConstraints(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
float* dev_positions;
float* dev_inverseMasses;
int* dev_indices;
float* dev_refShapeMatrixInverses;
float* dev_volumes;
cudaError_t deviceStatus;
//Allocate memory
int deviceCount = 0;
deviceStatus == cudaGetDeviceCount(&deviceCount);
std::cout << "Num CUDA Devices Found: " << deviceCount << std::endl;
deviceStatus = cudaErrorWrapper(cudaSetDevice(0));
getCudaDeviceProperties(0);
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_indices, indices.size() * sizeof(int)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_positions, positions.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_inverseMasses, inverseMasses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_refShapeMatrixInverses, refShapeMatrixInverses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_volumes, volumes.size() * sizeof(float)));
//Cpy memory
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_indices, &indices[0], indices.size() * sizeof(int), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_positions, &positions[0], positions.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_inverseMasses, &inverseMasses[0], inverseMasses.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_refShapeMatrixInverses, &refShapeMatrixInverses[0], refShapeMatrixInverses.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_volumes, &volumes[0], volumes.size() * sizeof(float), cudaMemcpyHostToDevice));
//Execute Kernel
solveFEMConstraint<<<1, 1>>>(dev_positions, dev_indices, dev_inverseMasses, dev_volumes, dev_refShapeMatrixInverses, lambda, mu);
cudaDeviceSynchronize();
//Cpy memory back
positions_result.resize(positions.size());
deviceStatus = cudaErrorWrapper(cudaMemcpy(&positions_result[0], dev_positions, positions_result.size() * sizeof(float), cudaMemcpyDeviceToHost));
//Free memory
cudaFree(dev_positions);
cudaFree(dev_inverseMasses);
cudaFree(dev_indices);
cudaFree(dev_refShapeMatrixInverses);
cudaFree(dev_volumes);
return deviceStatus;
}
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
positions_result.clear();
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(-0.86112528478748700f);
positions_result.push_back(-4.37303501877824000f);
positions_result.push_back(-1.16888554066580000f);
positions_result.push_back(0.645803837424706000f);
positions_result.push_back(-4.08169452857322000f);
positions_result.push_back(-1.97921356664365000f);
positions_result.push_back(0.656806413004164000f);
positions_result.push_back(-5.20915823509948000f);
positions_result.push_back(-0.28630813323995600f);
positions_result.push_back(-0.00948496564138351f);
positions_result.push_back(-4.91178046790357000f);
positions_result.push_back(-2.48359275945060000f);
}
|
1d61a4c5447b9d2e8c1ddc2db8fe078d470ab386.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
* To compile:
* nvcc -o linear_regression linear_regression.cu -lm
*
* To run:
* ./linear_regression
*
*****************************************************************************/
typedef struct dot_r {
double x;
double y;
} dot_r;
int f_data = 1000;
__device__ int d_f_data = 1000;
dot_r data[] = {
{77.71,118.37},{79.99,117.25},{69.06,122.31},{69.44,97.80},
{78.39,133.72},{75.10,116.40},{72.92,100.75},{66.00,107.78},
{78.42,135.07},{ 1.98,27.38},{44.30,98.30},{12.89,54.55},
{50.28,81.23},{ 0.87,32.41},{60.11,103.89},{61.07,95.05},
{57.58,94.71},{13.53,42.89},{85.53,147.68},{66.85,96.47},
{89.65,145.23},{59.58,114.64},{53.38,85.22},{10.20,40.31},
{83.60,114.19},{ 7.48,17.31},{80.00,117.22},{52.39,86.20},
{53.09,98.96},{90.75,128.71},{97.80,146.45},{19.76,30.07},
{73.05,98.86},{47.12,96.59},{16.89, 7.56},{42.79,65.89},
{78.08,116.52},{22.14,35.28},{ 8.32,34.35},{80.45,118.18},
{13.46,30.08},{98.94,148.92},{14.74,61.82},{23.09,60.07},
{73.49,134.11},{38.21,66.26},{ 7.08,13.11},{40.65,102.52},
{ 4.92,26.25},{72.74,128.55},{25.25,33.78},{71.87,115.97},
{73.70,125.19},{75.21,121.60},{47.52,85.10},{51.48,77.13},
{69.78,112.68},{74.89,120.82},{41.59,76.25},{ 6.25,21.02},
{53.77,92.85},{83.60,133.92},{13.30,33.67},{81.35,146.79},
{20.63,47.13},{75.03,113.14},{29.91,61.69},{40.32,64.97},
{26.41,64.73},{30.93,48.34},{50.20,66.71},{83.38,135.77},
{46.28,84.61},{52.76,98.56},{89.02,133.43},{47.94,69.37},
{86.83,127.83},{41.09,72.44},{86.41,124.49},{75.35,112.22},
{27.19,45.42},{79.23,122.70},{79.94,122.33},{ 2.55,29.54},
{23.62,34.45},{17.62,60.87},{61.60,110.86},{33.60,43.98},
{98.29,129.36},{96.42,159.26},{97.06,153.63},{23.54,63.41},
{92.21,124.73},{93.80,143.77},{99.21,141.99},{37.77,76.65},
{60.85,108.71},{32.82,76.23},{58.21,99.66},{41.05,67.89},
{20.40,44.63},{96.85,118.68},{93.03,151.95},{96.12,143.23},
{ 2.38,26.53},{74.99,117.20},{41.23,75.18},{ 1.22,15.65},
{86.09,140.03},{32.13,71.68},{ 5.24,36.52},{ 3.37,31.88},
{88.79,143.02},{74.29,132.13},{78.39,133.92},{48.90,83.32},
{35.85,61.90},{61.94,99.71},{55.87,100.07},{53.60,98.75},
{ 7.87,57.98},{18.03,54.82},{38.16,64.94},{97.60,152.45},
{83.75,132.52},{ 7.46,35.02},{45.36,90.96},{14.30,52.57},
{91.74,134.30},{84.93,131.68},{91.39,143.15},{22.66,50.78},
{56.21,114.58},{ 2.93,31.36},{24.46,59.05},{80.63,109.87},
{11.44,37.10},{63.49,111.92},{ 9.43,36.66},{61.90,106.73},
{33.41,64.37},{28.01,62.66},{68.99,115.81},{31.88,77.88},
{41.96,66.67},{56.29,93.49},{54.32,89.16},{21.69,43.79},
{98.14,141.51},{86.27,143.26},{13.78,39.65},{55.31,79.25},
{78.78,129.62},{75.89,114.13},{62.17,99.07},{ 2.85,20.60},
{26.17,58.13},{73.04,110.64},{82.63,138.70},{81.45,134.00},
{83.51,126.17},{65.61,89.46},{87.80,156.70},{37.41,84.56},
{66.08,117.32},{40.79,54.80},{33.53,65.25},{41.04,72.05},
{66.82,96.09},{64.98,99.59},{ 0.48,14.04},{ 3.79,27.94},
{75.75,112.43},{13.40,26.16},{71.22,124.57},{76.30,127.05},
{20.79,32.42},{50.82,96.31},{20.31,50.97},{90.14,139.39},
{38.36,72.64},{30.21,58.75},{21.07,41.05},{49.10,85.66},
{56.15,83.49},{95.58,145.48},{38.99,72.62},{77.33,127.70},
{18.89,27.48},{60.60,92.75},{82.51,158.13},{36.16,78.41},
{32.93,46.63},{95.76,156.67},{87.48,128.92},{39.63,67.11},
{26.92,43.70},{21.68,43.05},{ 5.25,21.22},{94.31,151.40},
{36.46,48.57},{86.11,143.05},{76.03,117.55},{93.51,148.78},
{28.82,36.20},{42.91,63.78},{42.68,73.03},{ 1.56,23.66},
{72.77,96.96},{12.89,37.54},{76.73,104.42},{13.11,37.49},
{79.24,122.47},{19.77,51.97},{97.41,167.81},{36.15,66.21},
{85.83,141.64},{97.81,140.42},{19.79,44.90},{60.73,100.93},
{71.57,109.06},{61.08,99.11},{26.65,56.85},{83.86,118.62},
{71.71,102.11},{95.39,157.38},{62.06,109.50},{51.76,96.57},
{87.21,151.31},{42.84,87.45},{77.11,127.24},{93.93,132.40},
{14.07,34.64},{76.22,107.11},{91.14,130.17},{41.13,93.39},
{45.86,65.70},{44.29,84.05},{88.13,129.71},{83.78,128.84},
{47.64,76.55},{43.71,89.37},{45.24,69.61},{41.40,73.34},
{78.05,115.35},{73.60,130.53},{51.39,76.25},{ 5.36,37.32},
{98.60,134.79},{55.74,107.52},{80.27,130.95},{55.86,112.57},
{76.90,132.49},{70.12,99.17},{37.98,63.30},{59.69,87.79},
{27.60,59.93},{ 7.85,13.81},{91.31,142.38},{61.71,90.80},
{ 3.53,20.92},{43.51,70.62},{67.48,111.76},{51.69,89.26},
{42.72,73.83},{62.41,84.52},{ 6.93,39.03},{53.73,72.68},
{78.51,134.77},{ 0.04,12.87},{32.23,69.84},{47.65,89.71},
{20.63,40.80},{31.71,64.32},{79.65,119.23},{44.49,80.50},
{15.85,71.07},{79.52,126.35},{49.54,76.21},{65.93,95.92},
{80.63,109.78},{ 3.89,18.16},{78.08,132.04},{13.10,18.05},
{ 8.09,48.23},{71.70,102.45},{39.58,73.17},{50.35,87.03},
{ 1.63,19.98},{43.46,81.78},{20.67,30.42},{41.44,84.93},
{48.79,82.65},{43.40,87.03},{27.04,78.79},{54.44,86.12},
{25.95,59.95},{68.03,121.75},{31.42,61.04},{61.36,110.79},
{21.85,64.55},{19.03,37.01},{67.99,130.99},{22.70,56.76},
{13.20,28.61},{53.60,88.43},{ 9.53,37.45},{94.33,131.89},
{85.92,136.44},{77.44,116.76},{85.34,119.62},{32.78,64.06},
{33.77,74.14},{15.69,39.03},{45.25,68.73},{70.58,101.24},
{81.07,121.24},{84.05,111.60},{28.02,49.85},{42.92,75.76},
{64.33,114.30},{54.02,102.96},{25.63,34.36},{13.15,34.17},
{72.20,102.12},{25.95,60.98},{11.33,32.68},{12.57,42.62},
{75.81,118.49},{33.39,75.99},{47.08,78.23},{41.85,80.72},
{32.80,54.82},{61.02,98.56},{51.27,83.86},{15.76,53.40},
{16.74,39.72},{55.21,96.87},{41.06,87.73},{44.64,70.94},
{ 6.37,28.78},{72.32,102.31},{19.40,44.87},{33.49,43.03},
{73.66,130.70},{33.05,63.86},{ 9.44,28.35},{93.85,143.55},
{88.61,131.82},{18.18,32.11},{85.96,137.80},{62.64,108.88},
{44.87,65.03},{ 7.97,29.72},{97.26,146.68},{88.75,128.80},
{27.41,50.93},{29.15,57.13},{ 8.87,31.25},{ 4.10,41.36},
{22.94,53.20},{55.52,107.71},{35.11,63.22},{28.63,60.14},
{47.21,72.73},{36.78,67.44},{20.55,52.79},{76.51,136.88},
{40.00,74.92},{69.58,118.32},{25.26,65.70},{24.34,55.95},
{29.39,48.23},{55.09,98.80},{22.29,42.40},{ 3.57,33.11},
{23.99,57.04},{25.25,61.68},{ 6.29, 4.79},{46.72,69.01},
{88.43,154.62},{49.62,83.67},{57.38,108.12},{ 1.65,32.45},
{14.21,40.08},{51.90,108.16},{23.59,62.75},{ 1.38,15.38},
{72.16,110.86},{23.69,63.86},{26.81,68.93},{58.09,96.22},
{ 3.11,31.31},{16.93,23.06},{20.73,47.49},{68.43,112.12},
{89.41,125.83},{38.03,70.19},{88.91,127.64},{15.60,37.27},
{79.80,130.93},{58.62,94.38},{97.38,161.61},{82.35,133.83},
{12.41,56.68},{76.26,130.66},{99.68,140.59},{41.95,69.47},
{67.29,107.94},{63.46,119.31},{58.18,94.67},{52.51,70.40},
{ 4.92,22.25},{38.59,73.84},{93.82,142.33},{84.56,125.48},
{27.33,73.83},{78.09,125.10},{27.32,52.72},{63.51,101.17},
{69.23,107.72},{71.50,129.66},{47.09,88.65},{ 1.69,12.36},
{14.43,29.88},{25.03,50.90},{87.32,138.41},{ 7.33,26.36},
{42.44,73.18},{81.54,138.65},{21.00,42.17},{20.01,60.70},
{10.91,50.60},{72.92,134.81},{25.72,47.36},{74.81,115.12},
{43.02,69.35},{ 7.49,17.92},{16.01,62.76},{47.61,78.91},
{63.03,114.84},{41.47,70.16},{10.99,43.14},{65.29,122.99},
{84.13,151.79},{31.56,72.09},{42.02,66.99},{75.43,122.59},
{66.67,108.12},{94.41,136.84},{65.70,104.84},{28.83,45.17},
{83.23,115.45},{83.22,132.69},{25.34,40.08},{39.41,77.42},
{86.43,137.37},{82.92,138.46},{77.39,130.12},{27.93,71.13},
{ 5.98,14.68},{53.22,102.60},{69.02,125.95},{31.21,52.17},
{60.89,96.81},{72.29,127.61},{59.73,97.42},{ 3.41,34.91},
{67.59,102.83},{ 3.52,25.26},{22.92,43.58},{ 9.56,35.27},
{75.71,118.93},{74.50,99.32},{75.97,109.67},{98.54,144.18},
{42.28,84.20},{11.03,49.30},{58.27,97.63},{68.86,115.18},
{18.28,39.07},{94.18,140.02},{85.29,139.47},{90.94,122.07},
{85.45,142.35},{24.99,57.95},{13.13,45.83},{11.05,36.79},
{34.63,68.62},{82.21,123.38},{77.92,125.53},{49.74,101.96},
{44.84,89.51},{55.42,82.02},{45.86,75.45},{75.20,123.93},
{86.83,129.61},{55.84,96.01},{94.94,161.02},{ 6.08,40.37},
{93.48,143.56},{69.31,108.07},{ 8.44,50.11},{90.19,124.44},
{ 7.76,39.72},{50.86,86.96},{75.60,120.34},{26.92,60.22},
{90.61,147.35},{28.75,47.08},{10.09,29.92},{41.39,85.20},
{42.61,89.96},{70.80,128.41},{95.80,150.46},{ 5.24,32.06},
{38.48,81.51},{ 1.84,20.27},{76.81,115.18},{94.45,149.21},
{97.84,147.80},{29.87,65.79},{89.72,124.70},{61.41,108.91},
{61.92,93.34},{93.02,138.49},{40.00,99.11},{93.69,140.55},
{49.15,79.54},{15.09,38.92},{72.51,110.53},{58.69,98.46},
{19.89,43.37},{30.08,53.34},{65.85,108.89},{24.23,61.85},
{ 4.00,18.71},{83.31,136.71},{95.61,155.94},{ 6.06,27.71},
{32.15,60.19},{52.35,87.88},{32.47,57.17},{18.16,31.09},
{30.95,70.51},{ 3.06,28.73},{67.59,105.87},{32.85,72.58},
{36.83,59.70},{94.38,143.76},{64.11,123.13},{ 3.82,47.87},
{ 0.05, 7.22},{97.38,138.16},{61.43,95.61},{48.82,71.95},
{40.72,83.12},{27.25,49.60},{68.62,119.62},{38.86,86.99},
{84.41,129.27},{50.41,94.42},{58.25,84.50},{76.15,115.62},
{98.74,157.68},{85.86,123.04},{75.11,121.87},{ 2.08,24.96},
{ 0.61,16.67},{44.85,62.44},{24.40,56.84},{27.55,74.58},
{35.04,50.21},{ 8.12,36.09},{82.93,122.09},{ 7.23,22.68},
{84.75,149.08},{98.09,135.56},{44.72,82.11},{56.69,99.54},
{73.44,108.31},{89.69,146.60},{15.68,52.36},{61.02,97.96},
{82.44,125.94},{15.20,37.46},{95.25,133.23},{63.12,116.50},
{61.00,90.26},{97.78,143.63},{26.50,63.40},{49.85,69.20},
{ 9.59,29.31},{65.87,108.15},{85.70,120.68},{24.60,49.25},
{37.32,63.35},{24.52,39.37},{45.29,98.28},{ 2.40,23.86},
{37.13,61.72},{18.83,46.26},{61.99,89.33},{10.78,35.44},
{96.87,140.16},{74.81,124.22},{92.42,150.59},{ 3.93,27.67},
{98.50,151.73},{83.89,138.01},{13.44,29.08},{12.43,63.45},
{59.00,107.05},{ 8.87,39.62},{95.43,137.76},{33.46,78.39},
{81.86,127.60},{62.80,82.03},{51.12,98.72},{ 6.46,22.45},
{34.17,71.95},{46.53,62.89},{51.89,86.67},{99.81,159.73},
{15.53,25.82},{27.02,53.90},{ 6.74,21.51},{ 8.39,46.49},
{ 4.18,26.44},{12.12,28.82},{12.32,29.01},{20.52,68.74},
{ 4.35,41.51},{36.92,40.93},{ 3.02,22.89},{31.95,65.75},
{88.17,130.42},{47.20,73.61},{28.83,46.88},{83.22,129.91},
{ 1.91, 6.78},{67.76,92.99},{20.53,46.48},{11.65,37.44},
{ 6.69,19.11},{ 5.65,24.92},{46.45,67.67},{86.36,126.52},
{53.65,92.55},{79.46,117.67},{ 1.93,18.91},{65.19,124.83},
{ 8.60,33.39},{53.01,88.02},{ 4.10,16.66},{19.55,47.08},
{70.43,106.99},{68.46,131.30},{43.00,88.27},{ 1.09,23.86},
{49.10,88.88},{38.80,71.23},{48.58,56.84},{17.51,49.43},
{86.81,136.83},{32.99,80.49},{40.77,69.47},{ 8.96,43.94},
{77.88,112.41},{90.41,130.55},{34.68,80.40},{26.12,38.12},
{97.31,131.87},{83.20,128.30},{49.34,92.64},{74.72,109.87},
{65.13,96.07},{40.33,95.63},{12.69,70.18},{93.04,123.67},
{62.77,95.33},{10.01,42.56},{50.26,91.79},{33.03,64.88},
{35.60,74.93},{22.34,71.49},{35.91,91.66},{63.35,107.85},
{55.45,81.38},{75.28,114.90},{83.57,143.65},{27.74,55.13},
{54.63,93.95},{77.31,140.26},{77.35,118.13},{77.60,134.28},
{24.18,40.23},{93.52,148.38},{89.15,134.32},{50.77,87.31},
{67.85,103.17},{78.51,139.13},{66.65,121.66},{55.62,100.25},
{38.93,68.47},{35.20,54.12},{48.24,81.83},{ 2.03,17.58},
{97.45,144.40},{47.17,72.08},{23.74,35.80},{25.37,48.72},
{ 5.31,15.37},{66.74,107.76},{48.65,97.69},{98.93,160.88},
{69.73,115.68},{65.00,105.45},{36.58,86.29},{11.47,12.24},
{35.17,59.65},{37.79,61.17},{27.99,66.00},{70.76,107.06},
{36.48,43.24},{30.39,38.81},{74.79,97.82},{99.11,141.48},
{65.18,94.20},{57.64,113.19},{60.38,105.02},{ 2.51,14.01},
{ 5.64,14.90},{99.02,152.02},{85.49,139.91},{51.13,88.82},
{91.16,139.02},{59.68,110.37},{28.17,61.92},{64.77,118.17},
{86.11,121.95},{ 0.65,29.67},{11.52,47.82},{55.48,89.30},
{85.96,134.27},{17.73,60.43},{72.41,127.45},{98.91,132.37},
{23.99,52.87},{75.61,122.24},{93.47,150.85},{38.10,89.12},
{36.36,93.40},{14.67,35.45},{19.00,28.81},{34.13,56.63},
{25.50,53.50},{66.38,106.13},{21.23,35.13},{58.52,101.30},
{45.48,85.22},{18.94,45.86},{36.91,71.11},{68.31,102.83},
{48.55,76.34},{83.76,119.65},{13.59,41.19},{25.11,59.64},
{88.34,140.80},{40.73,65.81},{75.06,117.91},{34.52,70.34},
{60.33,112.96},{93.45,159.26},{14.69,37.71},{67.94,108.60},
{66.55,105.72},{29.61,67.84},{54.44,86.84},{85.79,124.78},
{94.04,143.80},{47.76,103.96},{67.79,121.88},{ 4.08,26.63},
{66.30,118.13},{58.84,109.51},{78.38,119.86},{71.80,125.94},
{80.36,120.93},{43.39,55.98},{62.44,80.43},{59.86,100.00},
{52.63,89.05},{87.47,133.26},{ 4.52,41.66},{67.69,95.44},
{25.85,50.83},{62.84,125.73},{30.62,66.84},{41.36,72.66},
{90.15,116.95},{47.89,80.34},{66.11,113.55},{62.44,117.34},
{ 7.46,18.13},{ 2.31,22.79},{ 3.48,-2.56},{69.38,105.80},
{71.06,102.65},{21.22,43.33},{61.68,101.09},{82.58,128.02},
{16.07,41.68},{67.61,100.83},{92.63,136.33},{46.09,77.04},
{58.98,89.11},{47.14,62.19},{79.96,123.29},{69.28,107.76},
{57.66,101.57},{ 8.75,22.96},{76.37,109.15},{16.16,49.22},
{63.84,94.16},{50.39,80.51},{23.40,46.06},{ 1.19,11.59},
{33.56,107.61},{21.15,43.11},{50.64,105.18},{49.37,82.33},
{ 3.04,20.08},{86.17,134.57},{93.84,164.91},{37.66,72.16},
{37.60,76.78},{31.93,62.18},{24.76,64.81},{81.81,135.08},
{19.47,44.59},{51.06,93.21},{75.95,105.78},{45.11,84.19},
{26.09,53.01},{65.29,111.15},{16.30,43.46},{ 3.98,37.02},
{31.01,83.67},{36.74,80.27},{57.44,98.20},{20.22,55.81},
{90.30,142.05},{33.02,72.78},{10.00,30.94},{48.85,100.09},
{ 8.98,38.97},{16.55,50.11},{41.77,75.79},{85.21,117.09},
{ 7.76,16.77},{ 4.85,28.72},{51.86,86.07},{95.10,149.69},
{ 6.01,14.26},{88.88,149.08},{91.29,135.31},{90.04,127.70},
{87.74,140.95},{82.55,134.69},{35.17,77.45},{24.19,51.84},
{10.17,33.81},{91.68,139.46},{ 3.79,29.42},{78.61,127.21},
{19.41,48.24},{56.46,83.05},{16.64,44.55},{67.28,114.50},
{85.25,127.93},{90.71,131.57},{95.99,161.52},{ 2.47,22.18},
{51.76,86.07},{16.01,47.34},{80.02,127.68},{76.54,104.42},
{69.92,109.39},{15.84,43.24},{57.14,97.97},{10.51,36.98},
{83.47,136.46},{75.61,106.65},{58.51,98.79},{69.56,124.26},
{76.79,102.97},{26.32,62.18},{52.91,84.21},{68.31,100.40},
{29.00,47.75},{47.07,80.94},{13.08,48.23},{21.29,47.84},
{96.08,138.90},{73.92,111.23},{44.28,79.11},{38.58,71.59},
{30.00,61.29},{59.70,102.57},{89.92,136.71},{22.75,50.13},
{56.49,78.08},{85.09,123.94},{66.34,101.70},{81.95,139.69},
{53.94,82.47},{53.59,109.38},{61.26,93.45},{85.31,153.15},
{10.04,31.69},{46.56,82.32},{87.62,140.91},{37.29,68.26},
{89.50,152.28},{64.42,118.71},{54.39,82.86},{40.36,65.47},
{99.30,157.10},{58.89,110.67},{ 7.79,27.51},{30.56,57.76},
{25.36,53.57},{30.97,52.82},{43.58,66.82},{42.36,65.37},
{93.76,158.15},{82.47,125.24},{91.63,156.91},{78.72,105.92},
{31.87,62.72},{82.22,127.72},{64.74,103.19},{31.55,59.89},
{80.05,107.74},{92.46,143.29},{60.94,97.23},{68.53,108.77},
{85.14,120.73},{68.38,122.31},{80.44,114.39},{39.21,62.39},
{24.66,43.93},{22.87,49.86},{58.84,104.78},{99.98,153.75},
{69.48,113.97},{34.39,57.97},{83.36,138.58},{71.68,114.77},
{44.75,80.05},{ 9.39,40.75},{63.47,103.88},{47.26,84.64},
{29.85,66.80},{16.55,63.13},{51.43,89.78},{69.16,95.26},
{64.46,115.19},{63.15,104.10},{69.83,109.62},{99.17,146.82},
{25.56,45.36},{51.14,84.62},{75.33,113.95},{29.84,80.08},
{52.39,92.98},{79.23,113.80},{11.40,23.76},{26.31,58.95},
{93.83,152.10},{53.62,74.22},{ 2.21,18.99},{16.19,58.50},
{ 0.69,26.72},{86.80,127.51},{39.37,82.93},{27.86,65.81},
{64.34,90.19},{ 0.21,18.49},{16.40,42.73},{27.58,53.62},
{31.50,75.06},{69.92,120.33},{93.68,145.77},{33.52,52.58},
{44.61,66.12},{31.67,65.82},{50.45,70.54},{45.07,76.15}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int k;
double mean;
double error_sum = 0;
for(k=0; k<f_data; k++) {
error_sum += residual_error(data[k].x, data[k].y, m, c);
}
mean = error_sum / f_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, dot_r *d_data) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[k] = d_residual_error(d_data[k].x, d_data[k].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int k;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_k;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
dot_r *d_data;
be = rms_error(bm, bc);
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(k=0;k<8;k++) {
dm[k] = bm + (om[k] * step);
dc[k] = bc + (oc[k] * step);
}
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(k=0;k<8;k++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[k], &d_dc[k], d_error_sum_arr, d_data);
hipDeviceSynchronize();
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
for(int j=0; j<f_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / f_data;
e[k] = sqrt(error_sum_mean);
if(e[k] < best_error) {
best_error = e[k];
best_error_k = k;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_k];
bc = dc[best_error_k];
} else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
1d61a4c5447b9d2e8c1ddc2db8fe078d470ab386.cu
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
* To compile:
* nvcc -o linear_regression linear_regression.cu -lm
*
* To run:
* ./linear_regression
*
*****************************************************************************/
typedef struct dot_r {
double x;
double y;
} dot_r;
int f_data = 1000;
__device__ int d_f_data = 1000;
dot_r data[] = {
{77.71,118.37},{79.99,117.25},{69.06,122.31},{69.44,97.80},
{78.39,133.72},{75.10,116.40},{72.92,100.75},{66.00,107.78},
{78.42,135.07},{ 1.98,27.38},{44.30,98.30},{12.89,54.55},
{50.28,81.23},{ 0.87,32.41},{60.11,103.89},{61.07,95.05},
{57.58,94.71},{13.53,42.89},{85.53,147.68},{66.85,96.47},
{89.65,145.23},{59.58,114.64},{53.38,85.22},{10.20,40.31},
{83.60,114.19},{ 7.48,17.31},{80.00,117.22},{52.39,86.20},
{53.09,98.96},{90.75,128.71},{97.80,146.45},{19.76,30.07},
{73.05,98.86},{47.12,96.59},{16.89, 7.56},{42.79,65.89},
{78.08,116.52},{22.14,35.28},{ 8.32,34.35},{80.45,118.18},
{13.46,30.08},{98.94,148.92},{14.74,61.82},{23.09,60.07},
{73.49,134.11},{38.21,66.26},{ 7.08,13.11},{40.65,102.52},
{ 4.92,26.25},{72.74,128.55},{25.25,33.78},{71.87,115.97},
{73.70,125.19},{75.21,121.60},{47.52,85.10},{51.48,77.13},
{69.78,112.68},{74.89,120.82},{41.59,76.25},{ 6.25,21.02},
{53.77,92.85},{83.60,133.92},{13.30,33.67},{81.35,146.79},
{20.63,47.13},{75.03,113.14},{29.91,61.69},{40.32,64.97},
{26.41,64.73},{30.93,48.34},{50.20,66.71},{83.38,135.77},
{46.28,84.61},{52.76,98.56},{89.02,133.43},{47.94,69.37},
{86.83,127.83},{41.09,72.44},{86.41,124.49},{75.35,112.22},
{27.19,45.42},{79.23,122.70},{79.94,122.33},{ 2.55,29.54},
{23.62,34.45},{17.62,60.87},{61.60,110.86},{33.60,43.98},
{98.29,129.36},{96.42,159.26},{97.06,153.63},{23.54,63.41},
{92.21,124.73},{93.80,143.77},{99.21,141.99},{37.77,76.65},
{60.85,108.71},{32.82,76.23},{58.21,99.66},{41.05,67.89},
{20.40,44.63},{96.85,118.68},{93.03,151.95},{96.12,143.23},
{ 2.38,26.53},{74.99,117.20},{41.23,75.18},{ 1.22,15.65},
{86.09,140.03},{32.13,71.68},{ 5.24,36.52},{ 3.37,31.88},
{88.79,143.02},{74.29,132.13},{78.39,133.92},{48.90,83.32},
{35.85,61.90},{61.94,99.71},{55.87,100.07},{53.60,98.75},
{ 7.87,57.98},{18.03,54.82},{38.16,64.94},{97.60,152.45},
{83.75,132.52},{ 7.46,35.02},{45.36,90.96},{14.30,52.57},
{91.74,134.30},{84.93,131.68},{91.39,143.15},{22.66,50.78},
{56.21,114.58},{ 2.93,31.36},{24.46,59.05},{80.63,109.87},
{11.44,37.10},{63.49,111.92},{ 9.43,36.66},{61.90,106.73},
{33.41,64.37},{28.01,62.66},{68.99,115.81},{31.88,77.88},
{41.96,66.67},{56.29,93.49},{54.32,89.16},{21.69,43.79},
{98.14,141.51},{86.27,143.26},{13.78,39.65},{55.31,79.25},
{78.78,129.62},{75.89,114.13},{62.17,99.07},{ 2.85,20.60},
{26.17,58.13},{73.04,110.64},{82.63,138.70},{81.45,134.00},
{83.51,126.17},{65.61,89.46},{87.80,156.70},{37.41,84.56},
{66.08,117.32},{40.79,54.80},{33.53,65.25},{41.04,72.05},
{66.82,96.09},{64.98,99.59},{ 0.48,14.04},{ 3.79,27.94},
{75.75,112.43},{13.40,26.16},{71.22,124.57},{76.30,127.05},
{20.79,32.42},{50.82,96.31},{20.31,50.97},{90.14,139.39},
{38.36,72.64},{30.21,58.75},{21.07,41.05},{49.10,85.66},
{56.15,83.49},{95.58,145.48},{38.99,72.62},{77.33,127.70},
{18.89,27.48},{60.60,92.75},{82.51,158.13},{36.16,78.41},
{32.93,46.63},{95.76,156.67},{87.48,128.92},{39.63,67.11},
{26.92,43.70},{21.68,43.05},{ 5.25,21.22},{94.31,151.40},
{36.46,48.57},{86.11,143.05},{76.03,117.55},{93.51,148.78},
{28.82,36.20},{42.91,63.78},{42.68,73.03},{ 1.56,23.66},
{72.77,96.96},{12.89,37.54},{76.73,104.42},{13.11,37.49},
{79.24,122.47},{19.77,51.97},{97.41,167.81},{36.15,66.21},
{85.83,141.64},{97.81,140.42},{19.79,44.90},{60.73,100.93},
{71.57,109.06},{61.08,99.11},{26.65,56.85},{83.86,118.62},
{71.71,102.11},{95.39,157.38},{62.06,109.50},{51.76,96.57},
{87.21,151.31},{42.84,87.45},{77.11,127.24},{93.93,132.40},
{14.07,34.64},{76.22,107.11},{91.14,130.17},{41.13,93.39},
{45.86,65.70},{44.29,84.05},{88.13,129.71},{83.78,128.84},
{47.64,76.55},{43.71,89.37},{45.24,69.61},{41.40,73.34},
{78.05,115.35},{73.60,130.53},{51.39,76.25},{ 5.36,37.32},
{98.60,134.79},{55.74,107.52},{80.27,130.95},{55.86,112.57},
{76.90,132.49},{70.12,99.17},{37.98,63.30},{59.69,87.79},
{27.60,59.93},{ 7.85,13.81},{91.31,142.38},{61.71,90.80},
{ 3.53,20.92},{43.51,70.62},{67.48,111.76},{51.69,89.26},
{42.72,73.83},{62.41,84.52},{ 6.93,39.03},{53.73,72.68},
{78.51,134.77},{ 0.04,12.87},{32.23,69.84},{47.65,89.71},
{20.63,40.80},{31.71,64.32},{79.65,119.23},{44.49,80.50},
{15.85,71.07},{79.52,126.35},{49.54,76.21},{65.93,95.92},
{80.63,109.78},{ 3.89,18.16},{78.08,132.04},{13.10,18.05},
{ 8.09,48.23},{71.70,102.45},{39.58,73.17},{50.35,87.03},
{ 1.63,19.98},{43.46,81.78},{20.67,30.42},{41.44,84.93},
{48.79,82.65},{43.40,87.03},{27.04,78.79},{54.44,86.12},
{25.95,59.95},{68.03,121.75},{31.42,61.04},{61.36,110.79},
{21.85,64.55},{19.03,37.01},{67.99,130.99},{22.70,56.76},
{13.20,28.61},{53.60,88.43},{ 9.53,37.45},{94.33,131.89},
{85.92,136.44},{77.44,116.76},{85.34,119.62},{32.78,64.06},
{33.77,74.14},{15.69,39.03},{45.25,68.73},{70.58,101.24},
{81.07,121.24},{84.05,111.60},{28.02,49.85},{42.92,75.76},
{64.33,114.30},{54.02,102.96},{25.63,34.36},{13.15,34.17},
{72.20,102.12},{25.95,60.98},{11.33,32.68},{12.57,42.62},
{75.81,118.49},{33.39,75.99},{47.08,78.23},{41.85,80.72},
{32.80,54.82},{61.02,98.56},{51.27,83.86},{15.76,53.40},
{16.74,39.72},{55.21,96.87},{41.06,87.73},{44.64,70.94},
{ 6.37,28.78},{72.32,102.31},{19.40,44.87},{33.49,43.03},
{73.66,130.70},{33.05,63.86},{ 9.44,28.35},{93.85,143.55},
{88.61,131.82},{18.18,32.11},{85.96,137.80},{62.64,108.88},
{44.87,65.03},{ 7.97,29.72},{97.26,146.68},{88.75,128.80},
{27.41,50.93},{29.15,57.13},{ 8.87,31.25},{ 4.10,41.36},
{22.94,53.20},{55.52,107.71},{35.11,63.22},{28.63,60.14},
{47.21,72.73},{36.78,67.44},{20.55,52.79},{76.51,136.88},
{40.00,74.92},{69.58,118.32},{25.26,65.70},{24.34,55.95},
{29.39,48.23},{55.09,98.80},{22.29,42.40},{ 3.57,33.11},
{23.99,57.04},{25.25,61.68},{ 6.29, 4.79},{46.72,69.01},
{88.43,154.62},{49.62,83.67},{57.38,108.12},{ 1.65,32.45},
{14.21,40.08},{51.90,108.16},{23.59,62.75},{ 1.38,15.38},
{72.16,110.86},{23.69,63.86},{26.81,68.93},{58.09,96.22},
{ 3.11,31.31},{16.93,23.06},{20.73,47.49},{68.43,112.12},
{89.41,125.83},{38.03,70.19},{88.91,127.64},{15.60,37.27},
{79.80,130.93},{58.62,94.38},{97.38,161.61},{82.35,133.83},
{12.41,56.68},{76.26,130.66},{99.68,140.59},{41.95,69.47},
{67.29,107.94},{63.46,119.31},{58.18,94.67},{52.51,70.40},
{ 4.92,22.25},{38.59,73.84},{93.82,142.33},{84.56,125.48},
{27.33,73.83},{78.09,125.10},{27.32,52.72},{63.51,101.17},
{69.23,107.72},{71.50,129.66},{47.09,88.65},{ 1.69,12.36},
{14.43,29.88},{25.03,50.90},{87.32,138.41},{ 7.33,26.36},
{42.44,73.18},{81.54,138.65},{21.00,42.17},{20.01,60.70},
{10.91,50.60},{72.92,134.81},{25.72,47.36},{74.81,115.12},
{43.02,69.35},{ 7.49,17.92},{16.01,62.76},{47.61,78.91},
{63.03,114.84},{41.47,70.16},{10.99,43.14},{65.29,122.99},
{84.13,151.79},{31.56,72.09},{42.02,66.99},{75.43,122.59},
{66.67,108.12},{94.41,136.84},{65.70,104.84},{28.83,45.17},
{83.23,115.45},{83.22,132.69},{25.34,40.08},{39.41,77.42},
{86.43,137.37},{82.92,138.46},{77.39,130.12},{27.93,71.13},
{ 5.98,14.68},{53.22,102.60},{69.02,125.95},{31.21,52.17},
{60.89,96.81},{72.29,127.61},{59.73,97.42},{ 3.41,34.91},
{67.59,102.83},{ 3.52,25.26},{22.92,43.58},{ 9.56,35.27},
{75.71,118.93},{74.50,99.32},{75.97,109.67},{98.54,144.18},
{42.28,84.20},{11.03,49.30},{58.27,97.63},{68.86,115.18},
{18.28,39.07},{94.18,140.02},{85.29,139.47},{90.94,122.07},
{85.45,142.35},{24.99,57.95},{13.13,45.83},{11.05,36.79},
{34.63,68.62},{82.21,123.38},{77.92,125.53},{49.74,101.96},
{44.84,89.51},{55.42,82.02},{45.86,75.45},{75.20,123.93},
{86.83,129.61},{55.84,96.01},{94.94,161.02},{ 6.08,40.37},
{93.48,143.56},{69.31,108.07},{ 8.44,50.11},{90.19,124.44},
{ 7.76,39.72},{50.86,86.96},{75.60,120.34},{26.92,60.22},
{90.61,147.35},{28.75,47.08},{10.09,29.92},{41.39,85.20},
{42.61,89.96},{70.80,128.41},{95.80,150.46},{ 5.24,32.06},
{38.48,81.51},{ 1.84,20.27},{76.81,115.18},{94.45,149.21},
{97.84,147.80},{29.87,65.79},{89.72,124.70},{61.41,108.91},
{61.92,93.34},{93.02,138.49},{40.00,99.11},{93.69,140.55},
{49.15,79.54},{15.09,38.92},{72.51,110.53},{58.69,98.46},
{19.89,43.37},{30.08,53.34},{65.85,108.89},{24.23,61.85},
{ 4.00,18.71},{83.31,136.71},{95.61,155.94},{ 6.06,27.71},
{32.15,60.19},{52.35,87.88},{32.47,57.17},{18.16,31.09},
{30.95,70.51},{ 3.06,28.73},{67.59,105.87},{32.85,72.58},
{36.83,59.70},{94.38,143.76},{64.11,123.13},{ 3.82,47.87},
{ 0.05, 7.22},{97.38,138.16},{61.43,95.61},{48.82,71.95},
{40.72,83.12},{27.25,49.60},{68.62,119.62},{38.86,86.99},
{84.41,129.27},{50.41,94.42},{58.25,84.50},{76.15,115.62},
{98.74,157.68},{85.86,123.04},{75.11,121.87},{ 2.08,24.96},
{ 0.61,16.67},{44.85,62.44},{24.40,56.84},{27.55,74.58},
{35.04,50.21},{ 8.12,36.09},{82.93,122.09},{ 7.23,22.68},
{84.75,149.08},{98.09,135.56},{44.72,82.11},{56.69,99.54},
{73.44,108.31},{89.69,146.60},{15.68,52.36},{61.02,97.96},
{82.44,125.94},{15.20,37.46},{95.25,133.23},{63.12,116.50},
{61.00,90.26},{97.78,143.63},{26.50,63.40},{49.85,69.20},
{ 9.59,29.31},{65.87,108.15},{85.70,120.68},{24.60,49.25},
{37.32,63.35},{24.52,39.37},{45.29,98.28},{ 2.40,23.86},
{37.13,61.72},{18.83,46.26},{61.99,89.33},{10.78,35.44},
{96.87,140.16},{74.81,124.22},{92.42,150.59},{ 3.93,27.67},
{98.50,151.73},{83.89,138.01},{13.44,29.08},{12.43,63.45},
{59.00,107.05},{ 8.87,39.62},{95.43,137.76},{33.46,78.39},
{81.86,127.60},{62.80,82.03},{51.12,98.72},{ 6.46,22.45},
{34.17,71.95},{46.53,62.89},{51.89,86.67},{99.81,159.73},
{15.53,25.82},{27.02,53.90},{ 6.74,21.51},{ 8.39,46.49},
{ 4.18,26.44},{12.12,28.82},{12.32,29.01},{20.52,68.74},
{ 4.35,41.51},{36.92,40.93},{ 3.02,22.89},{31.95,65.75},
{88.17,130.42},{47.20,73.61},{28.83,46.88},{83.22,129.91},
{ 1.91, 6.78},{67.76,92.99},{20.53,46.48},{11.65,37.44},
{ 6.69,19.11},{ 5.65,24.92},{46.45,67.67},{86.36,126.52},
{53.65,92.55},{79.46,117.67},{ 1.93,18.91},{65.19,124.83},
{ 8.60,33.39},{53.01,88.02},{ 4.10,16.66},{19.55,47.08},
{70.43,106.99},{68.46,131.30},{43.00,88.27},{ 1.09,23.86},
{49.10,88.88},{38.80,71.23},{48.58,56.84},{17.51,49.43},
{86.81,136.83},{32.99,80.49},{40.77,69.47},{ 8.96,43.94},
{77.88,112.41},{90.41,130.55},{34.68,80.40},{26.12,38.12},
{97.31,131.87},{83.20,128.30},{49.34,92.64},{74.72,109.87},
{65.13,96.07},{40.33,95.63},{12.69,70.18},{93.04,123.67},
{62.77,95.33},{10.01,42.56},{50.26,91.79},{33.03,64.88},
{35.60,74.93},{22.34,71.49},{35.91,91.66},{63.35,107.85},
{55.45,81.38},{75.28,114.90},{83.57,143.65},{27.74,55.13},
{54.63,93.95},{77.31,140.26},{77.35,118.13},{77.60,134.28},
{24.18,40.23},{93.52,148.38},{89.15,134.32},{50.77,87.31},
{67.85,103.17},{78.51,139.13},{66.65,121.66},{55.62,100.25},
{38.93,68.47},{35.20,54.12},{48.24,81.83},{ 2.03,17.58},
{97.45,144.40},{47.17,72.08},{23.74,35.80},{25.37,48.72},
{ 5.31,15.37},{66.74,107.76},{48.65,97.69},{98.93,160.88},
{69.73,115.68},{65.00,105.45},{36.58,86.29},{11.47,12.24},
{35.17,59.65},{37.79,61.17},{27.99,66.00},{70.76,107.06},
{36.48,43.24},{30.39,38.81},{74.79,97.82},{99.11,141.48},
{65.18,94.20},{57.64,113.19},{60.38,105.02},{ 2.51,14.01},
{ 5.64,14.90},{99.02,152.02},{85.49,139.91},{51.13,88.82},
{91.16,139.02},{59.68,110.37},{28.17,61.92},{64.77,118.17},
{86.11,121.95},{ 0.65,29.67},{11.52,47.82},{55.48,89.30},
{85.96,134.27},{17.73,60.43},{72.41,127.45},{98.91,132.37},
{23.99,52.87},{75.61,122.24},{93.47,150.85},{38.10,89.12},
{36.36,93.40},{14.67,35.45},{19.00,28.81},{34.13,56.63},
{25.50,53.50},{66.38,106.13},{21.23,35.13},{58.52,101.30},
{45.48,85.22},{18.94,45.86},{36.91,71.11},{68.31,102.83},
{48.55,76.34},{83.76,119.65},{13.59,41.19},{25.11,59.64},
{88.34,140.80},{40.73,65.81},{75.06,117.91},{34.52,70.34},
{60.33,112.96},{93.45,159.26},{14.69,37.71},{67.94,108.60},
{66.55,105.72},{29.61,67.84},{54.44,86.84},{85.79,124.78},
{94.04,143.80},{47.76,103.96},{67.79,121.88},{ 4.08,26.63},
{66.30,118.13},{58.84,109.51},{78.38,119.86},{71.80,125.94},
{80.36,120.93},{43.39,55.98},{62.44,80.43},{59.86,100.00},
{52.63,89.05},{87.47,133.26},{ 4.52,41.66},{67.69,95.44},
{25.85,50.83},{62.84,125.73},{30.62,66.84},{41.36,72.66},
{90.15,116.95},{47.89,80.34},{66.11,113.55},{62.44,117.34},
{ 7.46,18.13},{ 2.31,22.79},{ 3.48,-2.56},{69.38,105.80},
{71.06,102.65},{21.22,43.33},{61.68,101.09},{82.58,128.02},
{16.07,41.68},{67.61,100.83},{92.63,136.33},{46.09,77.04},
{58.98,89.11},{47.14,62.19},{79.96,123.29},{69.28,107.76},
{57.66,101.57},{ 8.75,22.96},{76.37,109.15},{16.16,49.22},
{63.84,94.16},{50.39,80.51},{23.40,46.06},{ 1.19,11.59},
{33.56,107.61},{21.15,43.11},{50.64,105.18},{49.37,82.33},
{ 3.04,20.08},{86.17,134.57},{93.84,164.91},{37.66,72.16},
{37.60,76.78},{31.93,62.18},{24.76,64.81},{81.81,135.08},
{19.47,44.59},{51.06,93.21},{75.95,105.78},{45.11,84.19},
{26.09,53.01},{65.29,111.15},{16.30,43.46},{ 3.98,37.02},
{31.01,83.67},{36.74,80.27},{57.44,98.20},{20.22,55.81},
{90.30,142.05},{33.02,72.78},{10.00,30.94},{48.85,100.09},
{ 8.98,38.97},{16.55,50.11},{41.77,75.79},{85.21,117.09},
{ 7.76,16.77},{ 4.85,28.72},{51.86,86.07},{95.10,149.69},
{ 6.01,14.26},{88.88,149.08},{91.29,135.31},{90.04,127.70},
{87.74,140.95},{82.55,134.69},{35.17,77.45},{24.19,51.84},
{10.17,33.81},{91.68,139.46},{ 3.79,29.42},{78.61,127.21},
{19.41,48.24},{56.46,83.05},{16.64,44.55},{67.28,114.50},
{85.25,127.93},{90.71,131.57},{95.99,161.52},{ 2.47,22.18},
{51.76,86.07},{16.01,47.34},{80.02,127.68},{76.54,104.42},
{69.92,109.39},{15.84,43.24},{57.14,97.97},{10.51,36.98},
{83.47,136.46},{75.61,106.65},{58.51,98.79},{69.56,124.26},
{76.79,102.97},{26.32,62.18},{52.91,84.21},{68.31,100.40},
{29.00,47.75},{47.07,80.94},{13.08,48.23},{21.29,47.84},
{96.08,138.90},{73.92,111.23},{44.28,79.11},{38.58,71.59},
{30.00,61.29},{59.70,102.57},{89.92,136.71},{22.75,50.13},
{56.49,78.08},{85.09,123.94},{66.34,101.70},{81.95,139.69},
{53.94,82.47},{53.59,109.38},{61.26,93.45},{85.31,153.15},
{10.04,31.69},{46.56,82.32},{87.62,140.91},{37.29,68.26},
{89.50,152.28},{64.42,118.71},{54.39,82.86},{40.36,65.47},
{99.30,157.10},{58.89,110.67},{ 7.79,27.51},{30.56,57.76},
{25.36,53.57},{30.97,52.82},{43.58,66.82},{42.36,65.37},
{93.76,158.15},{82.47,125.24},{91.63,156.91},{78.72,105.92},
{31.87,62.72},{82.22,127.72},{64.74,103.19},{31.55,59.89},
{80.05,107.74},{92.46,143.29},{60.94,97.23},{68.53,108.77},
{85.14,120.73},{68.38,122.31},{80.44,114.39},{39.21,62.39},
{24.66,43.93},{22.87,49.86},{58.84,104.78},{99.98,153.75},
{69.48,113.97},{34.39,57.97},{83.36,138.58},{71.68,114.77},
{44.75,80.05},{ 9.39,40.75},{63.47,103.88},{47.26,84.64},
{29.85,66.80},{16.55,63.13},{51.43,89.78},{69.16,95.26},
{64.46,115.19},{63.15,104.10},{69.83,109.62},{99.17,146.82},
{25.56,45.36},{51.14,84.62},{75.33,113.95},{29.84,80.08},
{52.39,92.98},{79.23,113.80},{11.40,23.76},{26.31,58.95},
{93.83,152.10},{53.62,74.22},{ 2.21,18.99},{16.19,58.50},
{ 0.69,26.72},{86.80,127.51},{39.37,82.93},{27.86,65.81},
{64.34,90.19},{ 0.21,18.49},{16.40,42.73},{27.58,53.62},
{31.50,75.06},{69.92,120.33},{93.68,145.77},{33.52,52.58},
{44.61,66.12},{31.67,65.82},{50.45,70.54},{45.07,76.15}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int k;
double mean;
double error_sum = 0;
for(k=0; k<f_data; k++) {
error_sum += residual_error(data[k].x, data[k].y, m, c);
}
mean = error_sum / f_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, dot_r *d_data) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[k] = d_residual_error(d_data[k].x, d_data[k].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int k;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_k;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
dot_r *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(k=0;k<8;k++) {
dm[k] = bm + (om[k] * step);
dc[k] = bc + (oc[k] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(k=0;k<8;k++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[k], &d_dc[k], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<f_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / f_data;
e[k] = sqrt(error_sum_mean);
if(e[k] < best_error) {
best_error = e[k];
best_error_k = k;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_k];
bc = dc[best_error_k];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
737d3b610f83c83ea40c3cac9f1286a3e9166c3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fill.cuh"
#include "kernel_helpers_hip.cuh"
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
namespace NKernel
{
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, ui64 size, ui64 alignSize)
{
buffer += blockIdx.y * alignSize;
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, value);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream) {
if (size > 0) {
dim3 numBlocks;
const ui32 blockSize = 128;
numBlocks.x = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
numBlocks.y = columnCount;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size, alignSize);
}
}
template <typename T>
__global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size)
{
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, (T)(offset + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size);
}
}
template <typename T>
__global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
dst[indices[i]] = i;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream);
FILL_BUFFER(char) // i8 and char are distinct types
FILL_BUFFER(i8)
FILL_BUFFER(ui8)
FILL_BUFFER(i16)
FILL_BUFFER(ui16)
FILL_BUFFER(i32)
FILL_BUFFER(ui32)
FILL_BUFFER(i64)
FILL_BUFFER(ui64)
FILL_BUFFER(float)
FILL_BUFFER(double)
FILL_BUFFER(bool)
FILL_BUFFER(TCBinFeature)
#undef FILL_BUFFER
template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream);
template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream);
template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream);
}
|
737d3b610f83c83ea40c3cac9f1286a3e9166c3e.cu
|
#include "fill.cuh"
#include "kernel_helpers.cuh"
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
namespace NKernel
{
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, ui64 size, ui64 alignSize)
{
buffer += blockIdx.y * alignSize;
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, value);
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream) {
if (size > 0) {
dim3 numBlocks;
const ui32 blockSize = 128;
numBlocks.x = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount());
numBlocks.y = columnCount;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size, alignSize);
}
}
template <typename T>
__global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size)
{
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
WriteThrough(buffer + i, (T)(offset + i));
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size);
}
}
template <typename T>
__global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) {
ui64 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
dst[indices[i]] = i;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream)
{
if (size > 0)
{
const ui32 blockSize = 512;
const ui64 numBlocks = min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount());
InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, ui64 size, ui32 columnCount, ui64 alignSize, TCudaStream stream);
FILL_BUFFER(char) // i8 and char are distinct types
FILL_BUFFER(i8)
FILL_BUFFER(ui8)
FILL_BUFFER(i16)
FILL_BUFFER(ui16)
FILL_BUFFER(i32)
FILL_BUFFER(ui32)
FILL_BUFFER(i64)
FILL_BUFFER(ui64)
FILL_BUFFER(float)
FILL_BUFFER(double)
FILL_BUFFER(bool)
FILL_BUFFER(TCBinFeature)
#undef FILL_BUFFER
template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream);
template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream);
template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream);
template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream);
}
|
b95bc4a8ed6e232ab0e6367ba373627737d9aea8.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
#include "common/logging.h"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(PyObject* paramsDict, bool trans) :
_trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
// double start = Now();
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
// Log_Info("Finished layer in %.3f seconds.", Now() - start);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(PyObject* paramsDict)
: Layer(paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(PyObject* paramsDict, bool trans) :
Layer(paramsDict, trans) {
}
void WeightLayer::initialize(ConvNet* convNet, PyObject* paramsDict) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i]));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
const NVMatrix& v = getActsGrad();
int numCases = getNumCases(v);
_weights.update(numCases);
// Log_Info("Update bias... %f %f", _biases->getGrad().norm2(), _biases->getW().norm2());
_biases->update(numCases);
// Log_Info("Done... %f %f", _biases->getGrad().norm2(), _biases->getW().norm2());
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients(ConvNet* convNet) {
for (int i = 0; i < _weights.getSize(); i++) {
convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(PyObject* paramsDict) : WeightLayer(paramsDict, true) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, 0, 1);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
_weights[inpIdx].getGrad().addProduct(prevActs_T, v, 0, 1);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(PyObject* paramsDict)
: WeightLayer(paramsDict, false) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice);
cutilCheckMsg("hipMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(PyObject* paramsDict) : LocalLayer(paramsDict) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, 1);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, 1);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = 1;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(PyObject* paramsDict) : LocalLayer(paramsDict) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, 0, 1);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
float scaleInc = 0;
float scaleWGrad = 1;
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getGrad(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(PyObject* paramsDict) : Layer(paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(PyObject* paramsDict, bool trans)
: Layer(paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(PyObject* paramsDict) : PoolLayer(paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(PyObject* paramsDict) : PoolLayer(paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(PyObject* paramsDict) : ResponseNormLayer(paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(PyObject* paramsDict) : ResponseNormLayer(paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(PyObject* paramsDict, bool trans)
: Layer(paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(PyObject* paramsDict) : CostLayer(paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(PyObject* paramsDict) : CostLayer(paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
|
b95bc4a8ed6e232ab0e6367ba373627737d9aea8.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
#include "common/logging.h"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(PyObject* paramsDict, bool trans) :
_trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
// double start = Now();
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
// Log_Info("Finished layer in %.3f seconds.", Now() - start);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(PyObject* paramsDict)
: Layer(paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(PyObject* paramsDict, bool trans) :
Layer(paramsDict, trans) {
}
void WeightLayer::initialize(ConvNet* convNet, PyObject* paramsDict) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i]));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
const NVMatrix& v = getActsGrad();
int numCases = getNumCases(v);
_weights.update(numCases);
// Log_Info("Update bias... %f %f", _biases->getGrad().norm2(), _biases->getW().norm2());
_biases->update(numCases);
// Log_Info("Done... %f %f", _biases->getGrad().norm2(), _biases->getW().norm2());
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients(ConvNet* convNet) {
for (int i = 0; i < _weights.getSize(); i++) {
convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(PyObject* paramsDict) : WeightLayer(paramsDict, true) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, 0, 1);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
_weights[inpIdx].getGrad().addProduct(prevActs_T, v, 0, 1);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(PyObject* paramsDict)
: WeightLayer(paramsDict, false) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice);
cutilCheckMsg("cudaMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(PyObject* paramsDict) : LocalLayer(paramsDict) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, 1);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, 1);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = 1;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(PyObject* paramsDict) : LocalLayer(paramsDict) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, 0, 1);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
float scaleInc = 0;
float scaleWGrad = 1;
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getGrad(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(PyObject* paramsDict) : Layer(paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(PyObject* paramsDict, bool trans)
: Layer(paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(PyObject* paramsDict) : PoolLayer(paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(PyObject* paramsDict) : PoolLayer(paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(PyObject* paramsDict) : Layer(paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(PyObject* paramsDict) : ResponseNormLayer(paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(PyObject* paramsDict) : ResponseNormLayer(paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(PyObject* paramsDict, bool trans)
: Layer(paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(PyObject* paramsDict) : CostLayer(paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(PyObject* paramsDict) : CostLayer(paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
|
04f7c26fbb3f9bec9170278de304305d8850f34f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define CHUNK_LINE_BYTES (CHUNK_K * sizeof(int4))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 2 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w3a1(const int4 *A, const int4 *B, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int xb, int wb) {
// GEMM configuration.
M_GLOBAL = M_GLOBAL * wb;
N_GLOBAL = N_GLOBAL * xb;
int M_TILES = M_GLOBAL / M;
int N_TILES = N_GLOBAL / N;
int K_TILES = K_GLOBAL / K;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_TILES/8) * 8;
const unsigned int block_tile_j = block_pos % (N_TILES/8) * 8;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * (K_GLOBAL/128)] +
M * (K_GLOBAL/128) * (warpId % 4) * 2)
: (&B[block_tile_j * N * (K_GLOBAL/128)] +
N * (K_GLOBAL/128) * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
size_t shmem_idx =
warpId < (WARPS_PER_BLOCK / 2)
? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2)
: (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
(laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
(laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
// Shift the second half of the warp to the next row / column in the
// shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) =
*lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr = (int4 *)(lane_ptr +
(K_GLOBAL/128) * CHUNK_COPY_LINES_PER_WARP);
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * SHMEM_STRIDE * M * 2 +
(warpId % 2) * SHMEM_OFFSET; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * M + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x*4;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val[8];
typedef union {
int4 vec;
int a[4];
} U4;
U4 tmp0;
U4 tmp1;
U4 tmp2;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+16);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32);
// // if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// // for(int i = 0; i < 4; i++) {
// // printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// // }
// // printf("\n");
// // for(int i = 0; i < 4; i++) {
// // printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// // }
// // printf("\n");
// // }
val[0] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0];
val[1] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1];
val[2] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2];
val[3] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3];
// // printf("val0: %d, val1: %d\n", val[2*i], val[2*i+1]);
shmem_warp_stream_ptr += (48*64);
if (threadIdx.x < 80) {
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+16);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 4; i++) {
// printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// }
// printf("\n");
// for(int i = 0; i < 4; i++) {
// printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// }
// printf("\n");
// }
val[4] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0];
val[5] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1];
val[6] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2];
val[7] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3];
}
__syncthreads();
shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x*4;
// #pragma unroll
tmp0.a[0] = val[0];
tmp0.a[1] = val[1];
tmp0.a[2] = val[2];
tmp0.a[3] = val[3];
*(int4*)shmem_warp_stream_ptr = tmp0.vec;
if (threadIdx.x < 80) {
shmem_warp_stream_ptr += 16*64;
*shmem_warp_stream_ptr = val[4];
*(shmem_warp_stream_ptr+1) = val[5];
*(shmem_warp_stream_ptr+2) = val[6];
*(shmem_warp_stream_ptr+3) = val[7];
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 2; i++) {
// for(int j = 0; j < 2; j++) {
// printf("%d ", *((int*)&shmem[0][0]+i*64+j));
// }
// printf("\n");
// }
// }
shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x*4;
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*(M/3)*N_GLOBAL + block_tile_j*N + (threadIdx.x/16)*N_GLOBAL + (threadIdx.x%16)*4;
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*((int4 *)(dst_gmem_warp_stream_ptr)) = *((int4 *)(shmem_warp_stream_ptr));
if (threadIdx.x < 80) {
shmem_warp_stream_ptr += 16*64;
dst_gmem_warp_stream_ptr += 16*N_GLOBAL;
*((int4 *)(dst_gmem_warp_stream_ptr)) = *((int4 *)(shmem_warp_stream_ptr));
}
__syncthreads();
}
}
// #define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int X_BIT = 1;
int W_BIT = 3;
for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) {
int N_GLOBAL = M_GLOBAL;
int K_GLOBAL = M_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128)));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
// #ifdef verify_output
// int4 *X_h = NULL;
// int4 *W_h = NULL;
// int *Output_h = NULL;
// X_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * X_BIT);
// W_h = (int4 *)malloc(sizeof(int4) * (K_GLOBAL/128) * N_GLOBAL * W_BIT);
// Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
// printf("Preparing validation data for GPU...\n");
// init_matrices(A_h, B_h);
// checkCudaErrors(hipMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice));
// #endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
apmm_w3a1, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (apmm_w3a1), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, X, W, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V38, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
// #ifdef verify_output
// printf("Validating results...\n");
// checkCudaErrors(hipMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
// int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
// /* Copmpute reference matrix on CPU */
// compute_ref_w1a2(A_h, B_h, C_ref);
// /* validation results */
// validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL/2);
// free(A_h);
// free(B_h);
// free(C_h);
// #endif
checkCudaErrors(hipFree(reinterpret_cast<void *>(X)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(W)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
04f7c26fbb3f9bec9170278de304305d8850f34f.cu
|
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define CHUNK_LINE_BYTES (CHUNK_K * sizeof(int4))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 2 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w3a1(const int4 *A, const int4 *B, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int xb, int wb) {
// GEMM configuration.
M_GLOBAL = M_GLOBAL * wb;
N_GLOBAL = N_GLOBAL * xb;
int M_TILES = M_GLOBAL / M;
int N_TILES = N_GLOBAL / N;
int K_TILES = K_GLOBAL / K;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_TILES/8) * 8;
const unsigned int block_tile_j = block_pos % (N_TILES/8) * 8;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * (K_GLOBAL/128)] +
M * (K_GLOBAL/128) * (warpId % 4) * 2)
: (&B[block_tile_j * N * (K_GLOBAL/128)] +
N * (K_GLOBAL/128) * (warpId % 4) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
size_t shmem_idx =
warpId < (WARPS_PER_BLOCK / 2)
? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2)
: (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
(laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
(laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
// Shift the second half of the warp to the next row / column in the
// shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) =
*lane_ptr;
// Advance the global memory pointer and the shared memory index.
lane_ptr = (int4 *)(lane_ptr +
(K_GLOBAL/128) * CHUNK_COPY_LINES_PER_WARP);
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * SHMEM_STRIDE * M * 2 +
(warpId % 2) * SHMEM_OFFSET; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * M + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x*4;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
int val[8];
typedef union {
int4 vec;
int a[4];
} U4;
U4 tmp0;
U4 tmp1;
U4 tmp2;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+16);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32);
// // if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// // for(int i = 0; i < 4; i++) {
// // printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// // }
// // printf("\n");
// // for(int i = 0; i < 4; i++) {
// // printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// // }
// // printf("\n");
// // }
val[0] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0];
val[1] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1];
val[2] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2];
val[3] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3];
// // printf("val0: %d, val1: %d\n", val[2*i], val[2*i+1]);
shmem_warp_stream_ptr += (48*64);
if (threadIdx.x < 80) {
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+16);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 4; i++) {
// printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// }
// printf("\n");
// for(int i = 0; i < 4; i++) {
// printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// }
// printf("\n");
// }
val[4] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0];
val[5] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1];
val[6] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2];
val[7] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3];
}
__syncthreads();
shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x*4;
// #pragma unroll
tmp0.a[0] = val[0];
tmp0.a[1] = val[1];
tmp0.a[2] = val[2];
tmp0.a[3] = val[3];
*(int4*)shmem_warp_stream_ptr = tmp0.vec;
if (threadIdx.x < 80) {
shmem_warp_stream_ptr += 16*64;
*shmem_warp_stream_ptr = val[4];
*(shmem_warp_stream_ptr+1) = val[5];
*(shmem_warp_stream_ptr+2) = val[6];
*(shmem_warp_stream_ptr+3) = val[7];
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 2; i++) {
// for(int j = 0; j < 2; j++) {
// printf("%d ", *((int*)&shmem[0][0]+i*64+j));
// }
// printf("\n");
// }
// }
shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x*4;
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*(M/3)*N_GLOBAL + block_tile_j*N + (threadIdx.x/16)*N_GLOBAL + (threadIdx.x%16)*4;
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*((int4 *)(dst_gmem_warp_stream_ptr)) = *((int4 *)(shmem_warp_stream_ptr));
if (threadIdx.x < 80) {
shmem_warp_stream_ptr += 16*64;
dst_gmem_warp_stream_ptr += 16*N_GLOBAL;
*((int4 *)(dst_gmem_warp_stream_ptr)) = *((int4 *)(shmem_warp_stream_ptr));
}
__syncthreads();
}
}
// #define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int X_BIT = 1;
int W_BIT = 3;
for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) {
int N_GLOBAL = M_GLOBAL;
int K_GLOBAL = M_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128)));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
// #ifdef verify_output
// int4 *X_h = NULL;
// int4 *W_h = NULL;
// int *Output_h = NULL;
// X_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * X_BIT);
// W_h = (int4 *)malloc(sizeof(int4) * (K_GLOBAL/128) * N_GLOBAL * W_BIT);
// Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
// printf("Preparing validation data for GPU...\n");
// init_matrices(A_h, B_h);
// checkCudaErrors(cudaMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice));
// #endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
apmm_w3a1, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(apmm_w3a1<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(X, W, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V38, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
// #ifdef verify_output
// printf("Validating results...\n");
// checkCudaErrors(cudaMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
// int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
// /* Copmpute reference matrix on CPU */
// compute_ref_w1a2(A_h, B_h, C_ref);
// /* validation results */
// validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL/2);
// free(A_h);
// free(B_h);
// free(C_h);
// #endif
checkCudaErrors(cudaFree(reinterpret_cast<void *>(X)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(W)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
c2fd69cafa395cb386351612f116a19ff534de3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <cusolverDn.h>
#include <iostream>
#include "cuutils.h"
#include "absnf.h"
#include "utils.hpp"
#include <chrono>
#include <typeinfo>
#define t_def double
typedef std::chrono::high_resolution_clock::time_point TimeVar;
void single_execution(int s)
{
t_def *h_a = (t_def *)malloc(s*sizeof(t_def));
t_def *h_b = (t_def *)malloc(s*sizeof(t_def));
t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dx = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dz = (t_def *)malloc(s*sizeof(t_def));
t_def *h_dy = (t_def *)malloc(s*sizeof(t_def));
t_def *d_a; hipMalloc((void **)&d_a, s*sizeof(t_def));
t_def *d_b; hipMalloc((void **)&d_b, s*sizeof(t_def));
t_def *d_Z; hipMalloc((void **)&d_Z, s*s*sizeof(t_def));
t_def *d_L; hipMalloc((void **)&d_L, s*s*sizeof(t_def));
t_def *d_J; hipMalloc((void **)&d_J, s*s*sizeof(t_def));
t_def *d_Y; hipMalloc((void **)&d_Y, s*s*sizeof(t_def));
t_def *d_dx; hipMalloc((void **)&d_dx, s*sizeof(t_def));
t_def *d_dz; hipMalloc((void **)&d_dz, s*sizeof(t_def));
t_def *d_abs_dz; hipMalloc((void **)&d_abs_dz, s*sizeof(t_def));
t_def *d_dy; hipMalloc((void **)&d_dy, s*sizeof(t_def));
utils::fillRandVector(h_a, s,-10,10);
utils::fillRandVector(h_b, s,-10,10);
utils::fillRandVector(h_Z, s*s,-10,10);
utils::fillRandVector(h_J, s*s,-10,10);
utils::fillRandVector(h_Y, s*s,-10,10);
utils::fillRandVector(h_dx, s,-10,10);
utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER);
hipblasHandle_t cublas_handle;
hipblasCreate(&cublas_handle);
TimeVar t_0 = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a, s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_Z, h_Z, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_L, h_L, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_J, h_J, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_Y, h_Y, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_dx, h_dx, s*sizeof(t_def), hipMemcpyHostToDevice);
TimeVar t_1 = std::chrono::high_resolution_clock::now();
absnf::eval_core(cublas_handle, d_a, d_b,
d_Z, d_L,
d_J, d_Y,
d_dx,
s, s, s,
d_dz, d_dy,
d_abs_dz);
hipDeviceSynchronize();
TimeVar t_2 = std::chrono::high_resolution_clock::now();
hipMemcpy(h_dz, d_dz, s*sizeof(t_def), hipMemcpyDeviceToHost);
hipMemcpy(h_dy, d_dy, s*sizeof(t_def), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
TimeVar t_3 = std::chrono::high_resolution_clock::now();
auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
auto int_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_2 - t_1 ).count();
auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count();
auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count();
std::cout << "----" << s << "----" << std::endl;
std::cout <<"upload: " << int_upload << std::endl;
std::cout <<"exec: " << int_exec << std::endl;
std::cout <<"download: " << int_download << std::endl;
std::cout <<"total: " << int_total << std::endl;
free(h_a);
free(h_b);
free(h_Z);
free(h_L);
free(h_J);
free(h_Y);
free(h_dx);
hipFree(d_a);
hipFree(d_b);
hipFree(d_Z);
hipFree(d_L);
hipFree(d_J);
hipFree(d_Y);
hipFree(d_dx);
hipFree(d_dz);
hipFree(d_abs_dz);
hipFree(d_dy);
hipblasDestroy(cublas_handle);
}
void multiple_executions(int s, int executions)
{
t_def *h_a = (t_def *)malloc(s*sizeof(t_def));
t_def *h_b = (t_def *)malloc(s*sizeof(t_def));
t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dx = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dz = (t_def *)malloc(s*sizeof(t_def));
t_def *h_dy = (t_def *)malloc(s*sizeof(t_def));
t_def *d_a; hipMalloc((void **)&d_a, s*sizeof(t_def));
t_def *d_b; hipMalloc((void **)&d_b, s*sizeof(t_def));
t_def *d_Z; hipMalloc((void **)&d_Z, s*s*sizeof(t_def));
t_def *d_L; hipMalloc((void **)&d_L, s*s*sizeof(t_def));
t_def *d_J; hipMalloc((void **)&d_J, s*s*sizeof(t_def));
t_def *d_Y; hipMalloc((void **)&d_Y, s*s*sizeof(t_def));
t_def *d_dx; hipMalloc((void **)&d_dx, s*sizeof(t_def));
t_def *d_dz; hipMalloc((void **)&d_dz, s*sizeof(t_def));
t_def *d_abs_dz; hipMalloc((void **)&d_abs_dz, s*sizeof(t_def));
t_def *d_dy; hipMalloc((void **)&d_dy, s*sizeof(t_def));
utils::fillRandVector(h_a, s,-10,10);
utils::fillRandVector(h_b, s,-10,10);
utils::fillRandVector(h_Z, s*s,-10,10);
utils::fillRandVector(h_J, s*s,-10,10);
utils::fillRandVector(h_Y, s*s,-10,10);
utils::fillRandVector(h_dx, s,-10,10);
utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER);
hipblasHandle_t cublas_handle;
hipblasCreate(&cublas_handle);
// TimeVar t_0 = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a, s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_Z, h_Z, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_L, h_L, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_J, h_J, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_Y, h_Y, s*s*sizeof(t_def), hipMemcpyHostToDevice);
hipMemcpy(d_dx, h_dx, s*sizeof(t_def), hipMemcpyHostToDevice);
TimeVar t_0 = std::chrono::high_resolution_clock::now();
for(int i=0; i<executions; i++)
{
absnf::eval_core(cublas_handle, d_a, d_b,
d_Z, d_L,
d_J, d_Y,
d_dx,
s, s, s,
d_dz, d_dy,
d_abs_dz);
}
hipDeviceSynchronize();
TimeVar t_1 = std::chrono::high_resolution_clock::now();
hipMemcpy(h_dz, d_dz, s*sizeof(t_def), hipMemcpyDeviceToHost);
hipMemcpy(h_dy, d_dy, s*sizeof(t_def), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// TimeVar t_3 = std::chrono::high_resolution_clock::now();
// auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
auto int_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
// auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count();
// auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count();
std::cout << "----" << s << " : " << executions << "----" << std::endl;
std::cout <<"exec: " << int_exec << std::endl;
free(h_a);
free(h_b);
free(h_Z);
free(h_L);
free(h_J);
free(h_Y);
free(h_dx);
hipFree(d_a);
hipFree(d_b);
hipFree(d_Z);
hipFree(d_L);
hipFree(d_J);
hipFree(d_Y);
hipFree(d_dx);
hipFree(d_dz);
hipFree(d_abs_dz);
hipFree(d_dy);
hipblasDestroy(cublas_handle);
}
void devInfo()
{
hipDeviceProp_t prop;
int devcount;
hipGetDeviceCount(&devcount);
std::cout << "Devices found: " << devcount << std::endl;
for(int i=0; i<devcount; i++)
{
hipGetDeviceProperties(&prop, i);
std::cout << "------------------" << std::endl;
std::cout << "Device: " << i << std::endl;
std::cout << "------------------" << std::endl;
std::cout << "Name:\t\t\t" << prop.name << std::endl;
std::cout << "GlobalMemory:\t\t" << prop.totalGlobalMem << std::endl;
std::cout << "WarpSize:\t\t" << prop.warpSize << std::endl;
std::cout << "MaxThreadsPerBlock:\t" << prop.maxThreadsPerBlock << std::endl;
std::cout << "MaxThreadsDim:\t\t" << prop.maxThreadsDim[0] << " : " << prop.maxThreadsDim[1] << " : " << prop.maxThreadsDim[2] << std::endl;
std::cout << "MaxGridSize:\t\t" << prop.maxGridSize[0] << " : " << prop.maxGridSize[1] << " : " << prop.maxGridSize[2] << std::endl;
std::cout << "MultiProcessorCount:\t" << prop.multiProcessorCount << std::endl;
}
}
long int getGlobalMemory()
{
long int globalMemory = 0;
hipDeviceProp_t prop;
int devcount;
hipGetDeviceCount(&devcount);
if (devcount > 0)
{
hipGetDeviceProperties(&prop, 0);
globalMemory = prop.totalGlobalMem;
}
return globalMemory;
}
long int calcRequiredMemory(int s)
{
return (4*s*s + 6*s) * sizeof(t_def);
}
void single_execution_series()
{
devInfo();
long int globalMemory = getGlobalMemory();
std::cout << globalMemory << std::endl;
// SINGLE EXECUTIONS
int size = 1000;
int maxsize = 20000;
while(true)
{
long int requiredMemory = calcRequiredMemory(size);
if(requiredMemory > (long int) (globalMemory * 0.9) && size < maxsize)
{
break;
}
else
{
single_execution(size);
std::cout << "Required Memory: " << requiredMemory * 1e-9 << std::endl;
size+=1000;
}
}
}
void multiple_executions_series(int times)
{
devInfo();
long int globalMemory = getGlobalMemory();
std::cout << globalMemory << std::endl;
int size = 1000;
int maxsize = 20000;
while(true)
{
long int requiredMemory = calcRequiredMemory(size);
if(requiredMemory > (long int) (globalMemory * 0.9) && size < maxsize)
{
break;
}
else
{
multiple_executions(size, times);
std::cout << "Required Memory: " << requiredMemory * 1e-9 << std::endl;
size+=1000;
}
}
}
void measure_performance()
{
std::cout << "------------------------------------------------" << std::endl;
std::cout << "Type: " << typeid(t_def).name() << std::endl;
std::cout << "------------------------------------------------" << std::endl;
single_execution_series();
multiple_executions_series(1000);
}
int main()
{
measure_performance();
// multiple_executions_series(1000);
// multiple_executions(6000,100);
return 0;
}
|
c2fd69cafa395cb386351612f116a19ff534de3e.cu
|
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <iostream>
#include "cuutils.h"
#include "absnf.h"
#include "utils.hpp"
#include <chrono>
#include <typeinfo>
#define t_def double
typedef std::chrono::high_resolution_clock::time_point TimeVar;
void single_execution(int s)
{
t_def *h_a = (t_def *)malloc(s*sizeof(t_def));
t_def *h_b = (t_def *)malloc(s*sizeof(t_def));
t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dx = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dz = (t_def *)malloc(s*sizeof(t_def));
t_def *h_dy = (t_def *)malloc(s*sizeof(t_def));
t_def *d_a; cudaMalloc((void **)&d_a, s*sizeof(t_def));
t_def *d_b; cudaMalloc((void **)&d_b, s*sizeof(t_def));
t_def *d_Z; cudaMalloc((void **)&d_Z, s*s*sizeof(t_def));
t_def *d_L; cudaMalloc((void **)&d_L, s*s*sizeof(t_def));
t_def *d_J; cudaMalloc((void **)&d_J, s*s*sizeof(t_def));
t_def *d_Y; cudaMalloc((void **)&d_Y, s*s*sizeof(t_def));
t_def *d_dx; cudaMalloc((void **)&d_dx, s*sizeof(t_def));
t_def *d_dz; cudaMalloc((void **)&d_dz, s*sizeof(t_def));
t_def *d_abs_dz; cudaMalloc((void **)&d_abs_dz, s*sizeof(t_def));
t_def *d_dy; cudaMalloc((void **)&d_dy, s*sizeof(t_def));
utils::fillRandVector(h_a, s,-10,10);
utils::fillRandVector(h_b, s,-10,10);
utils::fillRandVector(h_Z, s*s,-10,10);
utils::fillRandVector(h_J, s*s,-10,10);
utils::fillRandVector(h_Y, s*s,-10,10);
utils::fillRandVector(h_dx, s,-10,10);
utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER);
cublasHandle_t cublas_handle;
cublasCreate(&cublas_handle);
TimeVar t_0 = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a, s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_Z, h_Z, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_L, h_L, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_J, h_J, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_dx, h_dx, s*sizeof(t_def), cudaMemcpyHostToDevice);
TimeVar t_1 = std::chrono::high_resolution_clock::now();
absnf::eval_core(cublas_handle, d_a, d_b,
d_Z, d_L,
d_J, d_Y,
d_dx,
s, s, s,
d_dz, d_dy,
d_abs_dz);
cudaDeviceSynchronize();
TimeVar t_2 = std::chrono::high_resolution_clock::now();
cudaMemcpy(h_dz, d_dz, s*sizeof(t_def), cudaMemcpyDeviceToHost);
cudaMemcpy(h_dy, d_dy, s*sizeof(t_def), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
TimeVar t_3 = std::chrono::high_resolution_clock::now();
auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
auto int_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_2 - t_1 ).count();
auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count();
auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count();
std::cout << "----" << s << "----" << std::endl;
std::cout <<"upload: " << int_upload << std::endl;
std::cout <<"exec: " << int_exec << std::endl;
std::cout <<"download: " << int_download << std::endl;
std::cout <<"total: " << int_total << std::endl;
free(h_a);
free(h_b);
free(h_Z);
free(h_L);
free(h_J);
free(h_Y);
free(h_dx);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_Z);
cudaFree(d_L);
cudaFree(d_J);
cudaFree(d_Y);
cudaFree(d_dx);
cudaFree(d_dz);
cudaFree(d_abs_dz);
cudaFree(d_dy);
cublasDestroy(cublas_handle);
}
void multiple_executions(int s, int executions)
{
t_def *h_a = (t_def *)malloc(s*sizeof(t_def));
t_def *h_b = (t_def *)malloc(s*sizeof(t_def));
t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dx = (t_def *)malloc(s*s*sizeof(t_def));
t_def *h_dz = (t_def *)malloc(s*sizeof(t_def));
t_def *h_dy = (t_def *)malloc(s*sizeof(t_def));
t_def *d_a; cudaMalloc((void **)&d_a, s*sizeof(t_def));
t_def *d_b; cudaMalloc((void **)&d_b, s*sizeof(t_def));
t_def *d_Z; cudaMalloc((void **)&d_Z, s*s*sizeof(t_def));
t_def *d_L; cudaMalloc((void **)&d_L, s*s*sizeof(t_def));
t_def *d_J; cudaMalloc((void **)&d_J, s*s*sizeof(t_def));
t_def *d_Y; cudaMalloc((void **)&d_Y, s*s*sizeof(t_def));
t_def *d_dx; cudaMalloc((void **)&d_dx, s*sizeof(t_def));
t_def *d_dz; cudaMalloc((void **)&d_dz, s*sizeof(t_def));
t_def *d_abs_dz; cudaMalloc((void **)&d_abs_dz, s*sizeof(t_def));
t_def *d_dy; cudaMalloc((void **)&d_dy, s*sizeof(t_def));
utils::fillRandVector(h_a, s,-10,10);
utils::fillRandVector(h_b, s,-10,10);
utils::fillRandVector(h_Z, s*s,-10,10);
utils::fillRandVector(h_J, s*s,-10,10);
utils::fillRandVector(h_Y, s*s,-10,10);
utils::fillRandVector(h_dx, s,-10,10);
utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER);
cublasHandle_t cublas_handle;
cublasCreate(&cublas_handle);
// TimeVar t_0 = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a, s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_Z, h_Z, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_L, h_L, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_J, h_J, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y, s*s*sizeof(t_def), cudaMemcpyHostToDevice);
cudaMemcpy(d_dx, h_dx, s*sizeof(t_def), cudaMemcpyHostToDevice);
TimeVar t_0 = std::chrono::high_resolution_clock::now();
for(int i=0; i<executions; i++)
{
absnf::eval_core(cublas_handle, d_a, d_b,
d_Z, d_L,
d_J, d_Y,
d_dx,
s, s, s,
d_dz, d_dy,
d_abs_dz);
}
cudaDeviceSynchronize();
TimeVar t_1 = std::chrono::high_resolution_clock::now();
cudaMemcpy(h_dz, d_dz, s*sizeof(t_def), cudaMemcpyDeviceToHost);
cudaMemcpy(h_dy, d_dy, s*sizeof(t_def), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// TimeVar t_3 = std::chrono::high_resolution_clock::now();
// auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
auto int_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count();
// auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count();
// auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count();
std::cout << "----" << s << " : " << executions << "----" << std::endl;
std::cout <<"exec: " << int_exec << std::endl;
free(h_a);
free(h_b);
free(h_Z);
free(h_L);
free(h_J);
free(h_Y);
free(h_dx);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_Z);
cudaFree(d_L);
cudaFree(d_J);
cudaFree(d_Y);
cudaFree(d_dx);
cudaFree(d_dz);
cudaFree(d_abs_dz);
cudaFree(d_dy);
cublasDestroy(cublas_handle);
}
void devInfo()
{
cudaDeviceProp prop;
int devcount;
cudaGetDeviceCount(&devcount);
std::cout << "Devices found: " << devcount << std::endl;
for(int i=0; i<devcount; i++)
{
cudaGetDeviceProperties(&prop, i);
std::cout << "------------------" << std::endl;
std::cout << "Device: " << i << std::endl;
std::cout << "------------------" << std::endl;
std::cout << "Name:\t\t\t" << prop.name << std::endl;
std::cout << "GlobalMemory:\t\t" << prop.totalGlobalMem << std::endl;
std::cout << "WarpSize:\t\t" << prop.warpSize << std::endl;
std::cout << "MaxThreadsPerBlock:\t" << prop.maxThreadsPerBlock << std::endl;
std::cout << "MaxThreadsDim:\t\t" << prop.maxThreadsDim[0] << " : " << prop.maxThreadsDim[1] << " : " << prop.maxThreadsDim[2] << std::endl;
std::cout << "MaxGridSize:\t\t" << prop.maxGridSize[0] << " : " << prop.maxGridSize[1] << " : " << prop.maxGridSize[2] << std::endl;
std::cout << "MultiProcessorCount:\t" << prop.multiProcessorCount << std::endl;
}
}
long int getGlobalMemory()
{
long int globalMemory = 0;
cudaDeviceProp prop;
int devcount;
cudaGetDeviceCount(&devcount);
if (devcount > 0)
{
cudaGetDeviceProperties(&prop, 0);
globalMemory = prop.totalGlobalMem;
}
return globalMemory;
}
long int calcRequiredMemory(int s)
{
return (4*s*s + 6*s) * sizeof(t_def);
}
void single_execution_series()
{
devInfo();
long int globalMemory = getGlobalMemory();
std::cout << globalMemory << std::endl;
// SINGLE EXECUTIONS
int size = 1000;
int maxsize = 20000;
while(true)
{
long int requiredMemory = calcRequiredMemory(size);
if(requiredMemory > (long int) (globalMemory * 0.9) && size < maxsize)
{
break;
}
else
{
single_execution(size);
std::cout << "Required Memory: " << requiredMemory * 1e-9 << std::endl;
size+=1000;
}
}
}
void multiple_executions_series(int times)
{
devInfo();
long int globalMemory = getGlobalMemory();
std::cout << globalMemory << std::endl;
int size = 1000;
int maxsize = 20000;
while(true)
{
long int requiredMemory = calcRequiredMemory(size);
if(requiredMemory > (long int) (globalMemory * 0.9) && size < maxsize)
{
break;
}
else
{
multiple_executions(size, times);
std::cout << "Required Memory: " << requiredMemory * 1e-9 << std::endl;
size+=1000;
}
}
}
void measure_performance()
{
std::cout << "------------------------------------------------" << std::endl;
std::cout << "Type: " << typeid(t_def).name() << std::endl;
std::cout << "------------------------------------------------" << std::endl;
single_execution_series();
multiple_executions_series(1000);
}
int main()
{
measure_performance();
// multiple_executions_series(1000);
// multiple_executions(6000,100);
return 0;
}
|
da3c1dbca2f05b5aadad5eb23ade5a25eb024f03.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int rows = 1;
unsigned int cols = 1;
float *ddata = NULL;
hipMalloc(&ddata, XSIZE*YSIZE);
float *vdata = NULL;
hipMalloc(&vdata, XSIZE*YSIZE);
float *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,cols,ddata,vdata,results);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,cols,ddata,vdata,results);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,cols,ddata,vdata,results);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
da3c1dbca2f05b5aadad5eb23ade5a25eb024f03.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int rows = 1;
unsigned int cols = 1;
float *ddata = NULL;
cudaMalloc(&ddata, XSIZE*YSIZE);
float *vdata = NULL;
cudaMalloc(&vdata, XSIZE*YSIZE);
float *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel<<<gridBlock,threadBlock>>>(rows,cols,ddata,vdata,results);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel<<<gridBlock,threadBlock>>>(rows,cols,ddata,vdata,results);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel<<<gridBlock,threadBlock>>>(rows,cols,ddata,vdata,results);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
unique_encoding.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* unique_encoding.cu
*
* Created on: 8 pa 2015
* Author: Karol Dzitkowski
*/
#include "compression/unique/unique_encoding.hpp"
#include "core/cuda_macros.cuh"
#include "core/not_implemented_exception.hpp"
#include "core/cuda_launcher.cuh"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
namespace ddj {
SharedCudaPtr<char> UniqueEncoding::FindUnique(SharedCudaPtr<char> data, DataType type)
{
switch(type)
{
case DataType::d_int:
return boost::reinterpret_pointer_cast<CudaPtr<char>>(
FindUnique(boost::reinterpret_pointer_cast<CudaPtr<int>>(data)));
case DataType::d_float:
return boost::reinterpret_pointer_cast<CudaPtr<char>>(
FindUnique(boost::reinterpret_pointer_cast<CudaPtr<float>>(data)));
default:
throw NotImplementedException("No UniqueEncoding::FindUnique implementation for that type");
}
}
template<typename T>
SharedCudaPtr<T> UniqueEncoding::FindUnique(SharedCudaPtr<T> data)
{
thrust::device_ptr<T> dataPtr(data->get());
thrust::device_vector<T> dataVector(dataPtr, dataPtr+data->size());
thrust::sort(dataVector.begin(), dataVector.end());
auto end = thrust::unique(dataVector.begin(), dataVector.end());
int size = end - dataVector.begin();
auto result = CudaPtr<T>::make_shared(size);
result->fill(dataVector.data().get(), size);
return result;
}
template<typename T>
__global__ void _compressUniqueKernel(
T* data,
int dataSize,
int bitsNeeded,
int dataPerOutputCnt,
T* uniqueValues,
int freqCnt,
unsigned int* output,
int outputSize)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // output index
if(idx >= outputSize) return;
unsigned int result = 0;
for(int i = 0; i < dataPerOutputCnt; i++)
{
T value = data[idx * dataPerOutputCnt + i];
for(int j = 0; j < freqCnt; j++)
{
if(value == uniqueValues[j])
{
result = SaveNbitIntValToWord(bitsNeeded, i, j, result);
}
}
}
output[idx] = result;
}
// UNIQUE COMPRESSION ALGORITHM
//
// 1. As the first part we save size of unique values array and size of data to compress
// 2. Next we store an array of unique values
// 2. Then we compress all data (containing these unique values) using N bits, where
// N is the smallest number of bits we can use to encode unique values array length - 1
// We encode this way each value as it's index in unique values array. We store data in an
// array of unsigned int values, putting as many values as possible in unsigned int variables.
// For example if we need 4 bits to encode single value, we put 8 values in one unsigned int
// and store it in output table.
template<typename T>
SharedCudaPtr<char> UniqueEncoding::CompressUnique(SharedCudaPtr<T> data, SharedCudaPtr<T> unique)
{
if(data->size() <= 0)
return CudaPtr<char>::make_shared();
// CALCULATE SIZES
int uniqueSize = unique->size(); // how many distinct items to encode
int dataSize = data->size(); // size of data to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int);
int uniqueSizeInBytes = unique->size() * sizeof(T); // size in bytes of unique array
int headerSize = uniqueSizeInBytes + 2 * sizeof(size_t);// size of data header
// COMPRESS UNIQUE
auto result = CudaPtr<char>::make_shared(outputSizeInBytes + headerSize);
this->_policy.setSize(outputSize);
hipLaunch(this->_policy, _compressUniqueKernel<T>,
data->get(),
data->size(),
bitsNeeded,
dataPerOutputCnt,
unique->get(),
unique->size(),
(unsigned int*)(result->get()+headerSize),
outputSize);
// ATTACH HEADER
size_t sizes[2];
sizes[0] = uniqueSize;
sizes[1] = dataSize;
CUDA_CALL( hipMemcpy(result->get(), &sizes, 2*sizeof(size_t), CPY_HTD) );
CUDA_CALL( hipMemcpy(result->get()+2*sizeof(size_t), unique->get(), uniqueSizeInBytes, CPY_DTD) );
// printf("COMPRESS UNIQUE - uniqueSize = %d, dataSize = %d\n", uniqueSize, dataSize);
hipDeviceSynchronize();
CUDA_ASSERT_RETURN( hipGetLastError() );
return result;
}
template<typename T>
SharedCudaPtrVector<char> UniqueEncoding::Encode(SharedCudaPtr<T> data)
{
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO_FMT(_logger, "UNIQUE encoding START: data size = %lu", data->size());
if(data->size() <= 0)
return SharedCudaPtrVector<char>{ CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() };
auto unique = FindUnique(data);
// CALCULATE SIZES
int uniqueSize = unique->size(); // how many distinct items to encode
int dataSize = data->size(); // size of data to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int);
// COMPRESS UNIQUE
auto resultData = CudaPtr<char>::make_shared(outputSizeInBytes);
this->_policy.setSize(outputSize);
hipLaunch(this->_policy, _compressUniqueKernel<T>,
data->get(),
data->size(),
bitsNeeded,
dataPerOutputCnt,
unique->get(),
unique->size(),
(unsigned int*)resultData->get(),
outputSize);
hipDeviceSynchronize();
size_t metadataSize = unique->size() * sizeof(T) + 2*sizeof(size_t);
auto resultMetadata = CudaPtr<char>::make_shared(metadataSize);
size_t sizes[2] {unique->size(), data->size()};
CUDA_CALL( hipMemcpy(resultMetadata->get(), sizes, 2*sizeof(size_t), CPY_HTD) );
CUDA_CALL( hipMemcpy(
resultMetadata->get()+2*sizeof(size_t),
unique->get(),
unique->size()*sizeof(T),
CPY_DTD) );
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO(_logger, "UNIQUE enoding END");
return SharedCudaPtrVector<char> {resultMetadata, resultData};
}
template<typename T>
__global__ void _decompressUniqueKernel(
unsigned int* data,
T* unique,
const int size,
T* output,
const int outputSize,
const int bitsNeeded,
const int dataPerUnitCnt
)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // output index
if(idx >= size) return;
for(int i=0; i<dataPerUnitCnt; i++)
{
int index = ReadNbitIntValFromWord(bitsNeeded, i, data[idx]);
T value = unique[index];
int outputIndex = idx * dataPerUnitCnt + i;
if(outputIndex < outputSize)
output[outputIndex] = value;
}
}
// MOST FREQUENT DECOMPRESSION ALGORITHM
//
// 1. First we read number of unique values which are the first 4 bytes of data
// 2. Having the number of unique values we restore the original array of unique values
// 3. Then we count how many unsigned int values fit in encoded data and what is the minimal bit
// number to encode the number of unique values. Using that we can compute how many values
// are stored in sigle unsigned int number.
// 4. Then we take in parallel each unsigned int and decode it as the values from the array
// at index equal to that N bit block stored in unsigned int number casted to integer.
template<typename T>
SharedCudaPtr<T> UniqueEncoding::DecompressUnique(SharedCudaPtr<char> data)
{
if(data->size() <= 0)
return CudaPtr<T>::make_shared();
// GET SIZES
size_t sizes[2];
CUDA_CALL( hipMemcpy(&sizes[0], data->get(), 2*sizeof(size_t), CPY_DTH) );
int uniqueCnt = sizes[0];
int outputSize = sizes[1];
// printf("UNIQUE ENCODING - uniqueCnt = %d\n", uniqueCnt);
// printf("UNIQUE ENCODING - outputSize = %d\n", outputSize);
// GETE UNIQUE VALUES DATA
auto unique = CudaPtr<T>::make_shared(uniqueCnt);
T* uniqueDataPtr = (T*)(data->get()+2*sizeof(size_t));
unique->fill(uniqueDataPtr, uniqueCnt);
// CALCULATE SIZES
int bitsNeeded = ALT_BITLEN(uniqueCnt-1); // min bit cnt to encode unique values
int unitSize = sizeof(unsigned int); // single unit size in bytes
int unitBitSize = 8 * sizeof(unsigned int); // single unit size in bits
int dataPerUnitCnt = unitBitSize / bitsNeeded; // how many items are in one unit
int unitCnt = data->size() / unitSize; // how many units are in data
int uniqueSizeInBytes = uniqueCnt * sizeof(T); // size in bytes of unique array
int headerSize = uniqueSizeInBytes + 2 * sizeof(size_t);// size of data header
// DECOMPRESS DATA USING UNIQUE VALUES
auto result = CudaPtr<T>::make_shared(outputSize);
this->_policy.setSize(unitCnt);
hipLaunch(this->_policy, _decompressUniqueKernel<T>,
(unsigned int*)(data->get()+headerSize),
(T*)(data->get()+2*sizeof(size_t)),
unitCnt,
result->get(),
result->size(),
bitsNeeded,
dataPerUnitCnt);
hipDeviceSynchronize();
CUDA_ASSERT_RETURN( hipGetLastError() );
return result;
}
template<typename T>
SharedCudaPtr<T> UniqueEncoding::Decode(SharedCudaPtrVector<char> input)
{
LOG4CPLUS_INFO_FMT(
_logger,
"UNIQUE decoding START: input[0] size = %lu, input[1] size = %lu",
input[0]->size(), input[1]->size()
);
if(input[1]->size() <= 0)
return CudaPtr<T>::make_shared();
auto metadata = input[0];
auto data = input[1];
size_t sizes[2];
CUDA_CALL( hipMemcpy(sizes, metadata->get(), 2*sizeof(size_t), CPY_DTH) );
size_t uniqueSize = sizes[0];
size_t outputSize = sizes[1];
// CALCULATE SIZES
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bit cnt to encode unique values
int unitSize = sizeof(unsigned int); // single unit size in bytes
int unitBitSize = 8 * sizeof(unsigned int); // single unit size in bits
int dataPerUnitCnt = unitBitSize / bitsNeeded; // how many items are in one unit
int unitCnt = data->size() / unitSize; // how many units are in data
// DECOMPRESS DATA USING UNIQUE VALUES
auto result = CudaPtr<T>::make_shared(outputSize);
this->_policy.setSize(unitCnt);
hipLaunch(this->_policy, _decompressUniqueKernel<T>,
(unsigned int*)data->get(),
(T*)(metadata->get()+2*sizeof(size_t)),
unitCnt,
result->get(),
result->size(),
bitsNeeded,
dataPerUnitCnt);
hipDeviceSynchronize();
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO(_logger, "UNIQUE decoding END");
return result;
}
size_t UniqueEncoding::GetMetadataSize(SharedCudaPtr<char> data, DataType type)
{
if(data->size() <= 0) return 0;
auto unique = FindUnique(data, type);
return 2*sizeof(size_t) + unique->size();
}
size_t UniqueEncoding::GetCompressedSize(SharedCudaPtr<char> data, DataType type)
{
if(data->size() <= 0) return 0;
auto unique = FindUnique(data, type);
int typeSize = GetDataTypeSize(type); // size of used type
int uniqueSize = unique->size() / typeSize; // how many distinct items to encode
int dataSize = data->size() / typeSize; // no elements to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int); // output size after compression
return outputSizeInBytes;
}
#define UNIQUE_ENCODING_SPEC(X) \
template SharedCudaPtr<char> UniqueEncoding::CompressUnique<X>(SharedCudaPtr<X>, SharedCudaPtr<X>); \
template SharedCudaPtr<X> UniqueEncoding::DecompressUnique<X>(SharedCudaPtr<char>); \
template SharedCudaPtrVector<char> UniqueEncoding::Encode<X>(SharedCudaPtr<X>); \
template SharedCudaPtr<X> UniqueEncoding::Decode<X>(SharedCudaPtrVector<char>);
FOR_EACH(UNIQUE_ENCODING_SPEC, char, short, double, float, int, long, long long, unsigned int)
} /* namespace ddj */
|
unique_encoding.cu
|
/*
* unique_encoding.cu
*
* Created on: 8 paź 2015
* Author: Karol Dzitkowski
*/
#include "compression/unique/unique_encoding.hpp"
#include "core/cuda_macros.cuh"
#include "core/not_implemented_exception.hpp"
#include "core/cuda_launcher.cuh"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
namespace ddj {
SharedCudaPtr<char> UniqueEncoding::FindUnique(SharedCudaPtr<char> data, DataType type)
{
switch(type)
{
case DataType::d_int:
return boost::reinterpret_pointer_cast<CudaPtr<char>>(
FindUnique(boost::reinterpret_pointer_cast<CudaPtr<int>>(data)));
case DataType::d_float:
return boost::reinterpret_pointer_cast<CudaPtr<char>>(
FindUnique(boost::reinterpret_pointer_cast<CudaPtr<float>>(data)));
default:
throw NotImplementedException("No UniqueEncoding::FindUnique implementation for that type");
}
}
template<typename T>
SharedCudaPtr<T> UniqueEncoding::FindUnique(SharedCudaPtr<T> data)
{
thrust::device_ptr<T> dataPtr(data->get());
thrust::device_vector<T> dataVector(dataPtr, dataPtr+data->size());
thrust::sort(dataVector.begin(), dataVector.end());
auto end = thrust::unique(dataVector.begin(), dataVector.end());
int size = end - dataVector.begin();
auto result = CudaPtr<T>::make_shared(size);
result->fill(dataVector.data().get(), size);
return result;
}
template<typename T>
__global__ void _compressUniqueKernel(
T* data,
int dataSize,
int bitsNeeded,
int dataPerOutputCnt,
T* uniqueValues,
int freqCnt,
unsigned int* output,
int outputSize)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // output index
if(idx >= outputSize) return;
unsigned int result = 0;
for(int i = 0; i < dataPerOutputCnt; i++)
{
T value = data[idx * dataPerOutputCnt + i];
for(int j = 0; j < freqCnt; j++)
{
if(value == uniqueValues[j])
{
result = SaveNbitIntValToWord(bitsNeeded, i, j, result);
}
}
}
output[idx] = result;
}
// UNIQUE COMPRESSION ALGORITHM
//
// 1. As the first part we save size of unique values array and size of data to compress
// 2. Next we store an array of unique values
// 2. Then we compress all data (containing these unique values) using N bits, where
// N is the smallest number of bits we can use to encode unique values array length - 1
// We encode this way each value as it's index in unique values array. We store data in an
// array of unsigned int values, putting as many values as possible in unsigned int variables.
// For example if we need 4 bits to encode single value, we put 8 values in one unsigned int
// and store it in output table.
template<typename T>
SharedCudaPtr<char> UniqueEncoding::CompressUnique(SharedCudaPtr<T> data, SharedCudaPtr<T> unique)
{
if(data->size() <= 0)
return CudaPtr<char>::make_shared();
// CALCULATE SIZES
int uniqueSize = unique->size(); // how many distinct items to encode
int dataSize = data->size(); // size of data to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int);
int uniqueSizeInBytes = unique->size() * sizeof(T); // size in bytes of unique array
int headerSize = uniqueSizeInBytes + 2 * sizeof(size_t);// size of data header
// COMPRESS UNIQUE
auto result = CudaPtr<char>::make_shared(outputSizeInBytes + headerSize);
this->_policy.setSize(outputSize);
cudaLaunch(this->_policy, _compressUniqueKernel<T>,
data->get(),
data->size(),
bitsNeeded,
dataPerOutputCnt,
unique->get(),
unique->size(),
(unsigned int*)(result->get()+headerSize),
outputSize);
// ATTACH HEADER
size_t sizes[2];
sizes[0] = uniqueSize;
sizes[1] = dataSize;
CUDA_CALL( cudaMemcpy(result->get(), &sizes, 2*sizeof(size_t), CPY_HTD) );
CUDA_CALL( cudaMemcpy(result->get()+2*sizeof(size_t), unique->get(), uniqueSizeInBytes, CPY_DTD) );
// printf("COMPRESS UNIQUE - uniqueSize = %d, dataSize = %d\n", uniqueSize, dataSize);
cudaDeviceSynchronize();
CUDA_ASSERT_RETURN( cudaGetLastError() );
return result;
}
template<typename T>
SharedCudaPtrVector<char> UniqueEncoding::Encode(SharedCudaPtr<T> data)
{
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO_FMT(_logger, "UNIQUE encoding START: data size = %lu", data->size());
if(data->size() <= 0)
return SharedCudaPtrVector<char>{ CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() };
auto unique = FindUnique(data);
// CALCULATE SIZES
int uniqueSize = unique->size(); // how many distinct items to encode
int dataSize = data->size(); // size of data to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int);
// COMPRESS UNIQUE
auto resultData = CudaPtr<char>::make_shared(outputSizeInBytes);
this->_policy.setSize(outputSize);
cudaLaunch(this->_policy, _compressUniqueKernel<T>,
data->get(),
data->size(),
bitsNeeded,
dataPerOutputCnt,
unique->get(),
unique->size(),
(unsigned int*)resultData->get(),
outputSize);
cudaDeviceSynchronize();
size_t metadataSize = unique->size() * sizeof(T) + 2*sizeof(size_t);
auto resultMetadata = CudaPtr<char>::make_shared(metadataSize);
size_t sizes[2] {unique->size(), data->size()};
CUDA_CALL( cudaMemcpy(resultMetadata->get(), sizes, 2*sizeof(size_t), CPY_HTD) );
CUDA_CALL( cudaMemcpy(
resultMetadata->get()+2*sizeof(size_t),
unique->get(),
unique->size()*sizeof(T),
CPY_DTD) );
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO(_logger, "UNIQUE enoding END");
return SharedCudaPtrVector<char> {resultMetadata, resultData};
}
template<typename T>
__global__ void _decompressUniqueKernel(
unsigned int* data,
T* unique,
const int size,
T* output,
const int outputSize,
const int bitsNeeded,
const int dataPerUnitCnt
)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; // output index
if(idx >= size) return;
for(int i=0; i<dataPerUnitCnt; i++)
{
int index = ReadNbitIntValFromWord(bitsNeeded, i, data[idx]);
T value = unique[index];
int outputIndex = idx * dataPerUnitCnt + i;
if(outputIndex < outputSize)
output[outputIndex] = value;
}
}
// MOST FREQUENT DECOMPRESSION ALGORITHM
//
// 1. First we read number of unique values which are the first 4 bytes of data
// 2. Having the number of unique values we restore the original array of unique values
// 3. Then we count how many unsigned int values fit in encoded data and what is the minimal bit
// number to encode the number of unique values. Using that we can compute how many values
// are stored in sigle unsigned int number.
// 4. Then we take in parallel each unsigned int and decode it as the values from the array
// at index equal to that N bit block stored in unsigned int number casted to integer.
template<typename T>
SharedCudaPtr<T> UniqueEncoding::DecompressUnique(SharedCudaPtr<char> data)
{
if(data->size() <= 0)
return CudaPtr<T>::make_shared();
// GET SIZES
size_t sizes[2];
CUDA_CALL( cudaMemcpy(&sizes[0], data->get(), 2*sizeof(size_t), CPY_DTH) );
int uniqueCnt = sizes[0];
int outputSize = sizes[1];
// printf("UNIQUE ENCODING - uniqueCnt = %d\n", uniqueCnt);
// printf("UNIQUE ENCODING - outputSize = %d\n", outputSize);
// GETE UNIQUE VALUES DATA
auto unique = CudaPtr<T>::make_shared(uniqueCnt);
T* uniqueDataPtr = (T*)(data->get()+2*sizeof(size_t));
unique->fill(uniqueDataPtr, uniqueCnt);
// CALCULATE SIZES
int bitsNeeded = ALT_BITLEN(uniqueCnt-1); // min bit cnt to encode unique values
int unitSize = sizeof(unsigned int); // single unit size in bytes
int unitBitSize = 8 * sizeof(unsigned int); // single unit size in bits
int dataPerUnitCnt = unitBitSize / bitsNeeded; // how many items are in one unit
int unitCnt = data->size() / unitSize; // how many units are in data
int uniqueSizeInBytes = uniqueCnt * sizeof(T); // size in bytes of unique array
int headerSize = uniqueSizeInBytes + 2 * sizeof(size_t);// size of data header
// DECOMPRESS DATA USING UNIQUE VALUES
auto result = CudaPtr<T>::make_shared(outputSize);
this->_policy.setSize(unitCnt);
cudaLaunch(this->_policy, _decompressUniqueKernel<T>,
(unsigned int*)(data->get()+headerSize),
(T*)(data->get()+2*sizeof(size_t)),
unitCnt,
result->get(),
result->size(),
bitsNeeded,
dataPerUnitCnt);
cudaDeviceSynchronize();
CUDA_ASSERT_RETURN( cudaGetLastError() );
return result;
}
template<typename T>
SharedCudaPtr<T> UniqueEncoding::Decode(SharedCudaPtrVector<char> input)
{
LOG4CPLUS_INFO_FMT(
_logger,
"UNIQUE decoding START: input[0] size = %lu, input[1] size = %lu",
input[0]->size(), input[1]->size()
);
if(input[1]->size() <= 0)
return CudaPtr<T>::make_shared();
auto metadata = input[0];
auto data = input[1];
size_t sizes[2];
CUDA_CALL( cudaMemcpy(sizes, metadata->get(), 2*sizeof(size_t), CPY_DTH) );
size_t uniqueSize = sizes[0];
size_t outputSize = sizes[1];
// CALCULATE SIZES
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bit cnt to encode unique values
int unitSize = sizeof(unsigned int); // single unit size in bytes
int unitBitSize = 8 * sizeof(unsigned int); // single unit size in bits
int dataPerUnitCnt = unitBitSize / bitsNeeded; // how many items are in one unit
int unitCnt = data->size() / unitSize; // how many units are in data
// DECOMPRESS DATA USING UNIQUE VALUES
auto result = CudaPtr<T>::make_shared(outputSize);
this->_policy.setSize(unitCnt);
cudaLaunch(this->_policy, _decompressUniqueKernel<T>,
(unsigned int*)data->get(),
(T*)(metadata->get()+2*sizeof(size_t)),
unitCnt,
result->get(),
result->size(),
bitsNeeded,
dataPerUnitCnt);
cudaDeviceSynchronize();
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO(_logger, "UNIQUE decoding END");
return result;
}
size_t UniqueEncoding::GetMetadataSize(SharedCudaPtr<char> data, DataType type)
{
if(data->size() <= 0) return 0;
auto unique = FindUnique(data, type);
return 2*sizeof(size_t) + unique->size();
}
size_t UniqueEncoding::GetCompressedSize(SharedCudaPtr<char> data, DataType type)
{
if(data->size() <= 0) return 0;
auto unique = FindUnique(data, type);
int typeSize = GetDataTypeSize(type); // size of used type
int uniqueSize = unique->size() / typeSize; // how many distinct items to encode
int dataSize = data->size() / typeSize; // no elements to compress
int bitsNeeded = ALT_BITLEN(uniqueSize-1); // min bits needed to encode
int outputItemBitSize = 8 * sizeof(unsigned int); // how many bits are in output unit
int dataPerOutputCnt = outputItemBitSize / bitsNeeded; // how many items will be encoded in single unit
int outputSize = (dataSize + dataPerOutputCnt - 1) / dataPerOutputCnt; // output units cnt
int outputSizeInBytes = outputSize * sizeof(unsigned int); // output size after compression
return outputSizeInBytes;
}
#define UNIQUE_ENCODING_SPEC(X) \
template SharedCudaPtr<char> UniqueEncoding::CompressUnique<X>(SharedCudaPtr<X>, SharedCudaPtr<X>); \
template SharedCudaPtr<X> UniqueEncoding::DecompressUnique<X>(SharedCudaPtr<char>); \
template SharedCudaPtrVector<char> UniqueEncoding::Encode<X>(SharedCudaPtr<X>); \
template SharedCudaPtr<X> UniqueEncoding::Decode<X>(SharedCudaPtrVector<char>);
FOR_EACH(UNIQUE_ENCODING_SPEC, char, short, double, float, int, long, long long, unsigned int)
} /* namespace ddj */
|
d684e1753c5931a485159ebe530830d7f1c9cef9.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <hip/hip_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <stdlib.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 32
#define DATA_TYPE unsigned char
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
typedef struct image{
DATA_TYPE* dData;
unsigned int Width;
unsigned int Height;
unsigned int BPP; //Bits Per Pixel = 8, 16, 2ColorToFloat_Channels, or 32 bit
unsigned int Pitch;
} image_t;
KernelSetting ks;
KernelSetting ks2;
int* dResultsDataR = 0;
int* dResultsDataG = 0;
int* dResultsDataB = 0;
int* dResultsDataA = 0;
int* dResultsMaxR = 0;
int* dResultsMaxG = 0;
int* dResultsMaxB = 0;
int* dResultsMaxA = 0;
DATA_TYPE* dOutputDataR = 0;
DATA_TYPE* dOutputDataG = 0;
DATA_TYPE* dOutputDataB = 0;
DATA_TYPE* dOutputDataA = 0;
image loadSourceImage(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
tmp = FreeImage_ConvertTo8Bits(tmp); // Large image fix
image image;
image.dData = 0;
image.Width = FreeImage_GetWidth(tmp);
image.Height = FreeImage_GetHeight(tmp);
image.BPP = FreeImage_GetBPP(tmp);
image.Pitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width
checkCudaErrors(hipMallocManaged((void**)&image.dData, image.Pitch * image.Height * image.BPP / 8));
checkCudaErrors(hipMemcpy(image.dData, FreeImage_GetBits(tmp), image.Pitch * image.Height * image.BPP / 8, hipMemcpyHostToDevice));
//checkHostMatrix<DATA_TYPE>(FreeImage_GetBits(tmp), image.Pitch, image.Height, image.Width, "%hhu ", "Result of Linear Pitch Text");
//checkDeviceMatrix<DATA_TYPE>(image.dData, image.Pitch, image.Height, image.Width, "%hhu ", "Result of Linear Pitch Text");
FreeImage_Unload(tmp);
//FreeImage_DeInitialise();
return image;
}
void releaseMemory(image src)
{
if (src.dData != 0)
hipFree(src.dData);
if (dResultsDataR)
hipFree(dResultsDataR);
if (dResultsDataG)
hipFree(dResultsDataG);
if (dResultsDataB)
hipFree(dResultsDataB);
if (dResultsDataA)
hipFree(dResultsDataA);
if (dOutputDataR)
hipFree(dOutputDataR);
if (dOutputDataG)
hipFree(dOutputDataG);
if (dOutputDataB)
hipFree(dOutputDataB);
if (dOutputDataA)
hipFree(dOutputDataA);
FreeImage_DeInitialise();
}
__global__ void zeroKernel(
int* src, const unsigned int len)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len)
src[i] = 0;
}
__global__ void maxKernel(
int* src, const unsigned int len, int* max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len)
atomicMax(max, src[i]);
}
__global__ void histogramKernel(
DATA_TYPE* src, const unsigned int channel,
const unsigned int width, const unsigned int height, const unsigned int pitch,
int* result)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col <= width && row <= height)
{
int c = src[col + row * pitch] & channel;
atomicAdd(&result[c], 1);
}
}
__global__ void histogram2DKernel(
int* src, const unsigned int width, const unsigned int height, const int* limit, DATA_TYPE* result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width)
{
int v = src[x];
double val = v / (double)*limit;
v = height - height * val;
for (int y = height - 1; y >= 0; y--)
result[x + y * width] = y >= v ? 255 : 0;
}
}
void checkError(char* prefix)
{
hipDeviceSynchronize();
auto ex = hipGetLastError();
if (ex != NULL)
printf("Error at %s: %s\n", prefix, hipGetErrorString(ex));
}
void saveChannel(std::string name, const int size, const int limit, DATA_TYPE* data)
{
BYTE* result = (BYTE*)malloc(size * limit);
checkCudaErrors(hipMemcpy(result, data, size * limit, hipMemcpyDeviceToHost));
//checkHostMatrix(result, size, limit, size, "%d ");
FIBITMAP* img = FreeImage_ConvertFromRawBits(result, limit, limit, size, 8, 0xFF, 0xFF, 0xFF);
FreeImage_FlipVertical(img);
ImageManager::GenericWriter(img, ("D:\\Documents\\Projekty\\kola\\PA2\\cv9\\assets\\"+name+".png").c_str(), 0);
FreeImage_Unload(img);
SAFE_DELETE(result);
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
//ToDo: Edit to custom location
image src = loadSourceImage("D:\\Documents\\Projekty\\kola\\PA2\\cv9\\assets\\lena.png");
//image src = loadSourceImage("D:\\Documents\\Projekty\\kola\\PA2\\cv9\\assets\\RGB.png");
printf("Loaded\n");
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3((src.Width + BLOCK_DIM - 1) / BLOCK_DIM, (src.Height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
ks2.dimBlock = dim3(255, 1, 1);
ks2.blockSize = 255;
ks2.dimGrid = dim3(1, 1, 1);
const int ch_size = 255 * sizeof(char);
const int ch_limit = 100;
const int cmp_size = 255 * sizeof(int);
hipMallocManaged((void**)&dResultsDataR, cmp_size);
hipMallocManaged((void**)&dResultsDataG, cmp_size);
hipMallocManaged((void**)&dResultsDataB, cmp_size);
//hipMallocManaged((void**)&dResultsDataA, cmp_size);
hipMallocManaged((void**)&dResultsMaxR, sizeof(int));
hipMallocManaged((void**)&dResultsMaxG, sizeof(int));
hipMallocManaged((void**)&dResultsMaxB, sizeof(int));
//hipMallocManaged((void**)&dResultsMaxA, sizeof(int));
hipMallocManaged((void**)&dOutputDataR, ch_size * ch_limit);
hipMallocManaged((void**)&dOutputDataG, ch_size * ch_limit);
hipMallocManaged((void**)&dOutputDataB, ch_size * ch_limit);
//hipMallocManaged((void**)&dOutputDataA, ch_size * ch_limit);
checkError("Malloc");
hipEvent_t start, stop;
float time;
createTimer(&start, &stop, &time);
startTimer(start);
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255); checkError("Z-R");
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255); checkError("Z-G");
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255); checkError("Z-B");
//zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255); checkError("Z-A");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 224, src.Width, src.Height, src.Pitch, dResultsDataR); checkError("R");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 28, src.Width, src.Height, src.Pitch, dResultsDataG); checkError("G");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 3, src.Width, src.Height, src.Pitch, dResultsDataB); checkError("B");
//histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 3 << 6, src.Width, src.Height, src.Pitch, dResultsDataA); checkError("A");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255, dResultsMaxR); checkError("M-R");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255, dResultsMaxG); checkError("M-G");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255, dResultsMaxB); checkError("M-B");
//maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255, dResultsMaxA); checkError("M-A");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255, ch_limit, dResultsMaxR, dOutputDataR); checkError("2D-R");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255, ch_limit, dResultsMaxG, dOutputDataG); checkError("2D-G");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255, ch_limit, dResultsMaxB, dOutputDataB); checkError("2D-B");
//histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255, ch_limit, dResultsMaxA, dOutputDataA); checkError("2D-A");
stopTimer(start, stop, time);
printf("Time: %f ms\n", time);
saveChannel("R", ch_size, ch_limit, dOutputDataR);
saveChannel("G", ch_size, ch_limit, dOutputDataG);
saveChannel("B", ch_size, ch_limit, dOutputDataB);
//saveChannel("A", ch_size, ch_limit, dOutputDataA);
releaseMemory(src);
}
|
d684e1753c5931a485159ebe530830d7f1c9cef9.cu
|
// includes, cuda
#include <cuda_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <stdlib.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 32
#define DATA_TYPE unsigned char
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
typedef struct image{
DATA_TYPE* dData;
unsigned int Width;
unsigned int Height;
unsigned int BPP; //Bits Per Pixel = 8, 16, 2ColorToFloat_Channels, or 32 bit
unsigned int Pitch;
} image_t;
KernelSetting ks;
KernelSetting ks2;
int* dResultsDataR = 0;
int* dResultsDataG = 0;
int* dResultsDataB = 0;
int* dResultsDataA = 0;
int* dResultsMaxR = 0;
int* dResultsMaxG = 0;
int* dResultsMaxB = 0;
int* dResultsMaxA = 0;
DATA_TYPE* dOutputDataR = 0;
DATA_TYPE* dOutputDataG = 0;
DATA_TYPE* dOutputDataB = 0;
DATA_TYPE* dOutputDataA = 0;
image loadSourceImage(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
tmp = FreeImage_ConvertTo8Bits(tmp); // Large image fix
image image;
image.dData = 0;
image.Width = FreeImage_GetWidth(tmp);
image.Height = FreeImage_GetHeight(tmp);
image.BPP = FreeImage_GetBPP(tmp);
image.Pitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width
checkCudaErrors(cudaMallocManaged((void**)&image.dData, image.Pitch * image.Height * image.BPP / 8));
checkCudaErrors(cudaMemcpy(image.dData, FreeImage_GetBits(tmp), image.Pitch * image.Height * image.BPP / 8, cudaMemcpyHostToDevice));
//checkHostMatrix<DATA_TYPE>(FreeImage_GetBits(tmp), image.Pitch, image.Height, image.Width, "%hhu ", "Result of Linear Pitch Text");
//checkDeviceMatrix<DATA_TYPE>(image.dData, image.Pitch, image.Height, image.Width, "%hhu ", "Result of Linear Pitch Text");
FreeImage_Unload(tmp);
//FreeImage_DeInitialise();
return image;
}
void releaseMemory(image src)
{
if (src.dData != 0)
cudaFree(src.dData);
if (dResultsDataR)
cudaFree(dResultsDataR);
if (dResultsDataG)
cudaFree(dResultsDataG);
if (dResultsDataB)
cudaFree(dResultsDataB);
if (dResultsDataA)
cudaFree(dResultsDataA);
if (dOutputDataR)
cudaFree(dOutputDataR);
if (dOutputDataG)
cudaFree(dOutputDataG);
if (dOutputDataB)
cudaFree(dOutputDataB);
if (dOutputDataA)
cudaFree(dOutputDataA);
FreeImage_DeInitialise();
}
__global__ void zeroKernel(
int* src, const unsigned int len)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len)
src[i] = 0;
}
__global__ void maxKernel(
int* src, const unsigned int len, int* max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len)
atomicMax(max, src[i]);
}
__global__ void histogramKernel(
DATA_TYPE* src, const unsigned int channel,
const unsigned int width, const unsigned int height, const unsigned int pitch,
int* result)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col <= width && row <= height)
{
int c = src[col + row * pitch] & channel;
atomicAdd(&result[c], 1);
}
}
__global__ void histogram2DKernel(
int* src, const unsigned int width, const unsigned int height, const int* limit, DATA_TYPE* result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width)
{
int v = src[x];
double val = v / (double)*limit;
v = height - height * val;
for (int y = height - 1; y >= 0; y--)
result[x + y * width] = y >= v ? 255 : 0;
}
}
void checkError(char* prefix)
{
cudaDeviceSynchronize();
auto ex = cudaGetLastError();
if (ex != NULL)
printf("Error at %s: %s\n", prefix, cudaGetErrorString(ex));
}
void saveChannel(std::string name, const int size, const int limit, DATA_TYPE* data)
{
BYTE* result = (BYTE*)malloc(size * limit);
checkCudaErrors(cudaMemcpy(result, data, size * limit, cudaMemcpyDeviceToHost));
//checkHostMatrix(result, size, limit, size, "%d ");
FIBITMAP* img = FreeImage_ConvertFromRawBits(result, limit, limit, size, 8, 0xFF, 0xFF, 0xFF);
FreeImage_FlipVertical(img);
ImageManager::GenericWriter(img, ("D:\\Documents\\Projekty\\Škola\\PA2\\cv9\\assets\\"+name+".png").c_str(), 0);
FreeImage_Unload(img);
SAFE_DELETE(result);
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
//ToDo: Edit to custom location
image src = loadSourceImage("D:\\Documents\\Projekty\\Škola\\PA2\\cv9\\assets\\lena.png");
//image src = loadSourceImage("D:\\Documents\\Projekty\\Škola\\PA2\\cv9\\assets\\RGB.png");
printf("Loaded\n");
ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
ks.blockSize = BLOCK_DIM * BLOCK_DIM;
ks.dimGrid = dim3((src.Width + BLOCK_DIM - 1) / BLOCK_DIM, (src.Height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
ks2.dimBlock = dim3(255, 1, 1);
ks2.blockSize = 255;
ks2.dimGrid = dim3(1, 1, 1);
const int ch_size = 255 * sizeof(char);
const int ch_limit = 100;
const int cmp_size = 255 * sizeof(int);
cudaMallocManaged((void**)&dResultsDataR, cmp_size);
cudaMallocManaged((void**)&dResultsDataG, cmp_size);
cudaMallocManaged((void**)&dResultsDataB, cmp_size);
//cudaMallocManaged((void**)&dResultsDataA, cmp_size);
cudaMallocManaged((void**)&dResultsMaxR, sizeof(int));
cudaMallocManaged((void**)&dResultsMaxG, sizeof(int));
cudaMallocManaged((void**)&dResultsMaxB, sizeof(int));
//cudaMallocManaged((void**)&dResultsMaxA, sizeof(int));
cudaMallocManaged((void**)&dOutputDataR, ch_size * ch_limit);
cudaMallocManaged((void**)&dOutputDataG, ch_size * ch_limit);
cudaMallocManaged((void**)&dOutputDataB, ch_size * ch_limit);
//cudaMallocManaged((void**)&dOutputDataA, ch_size * ch_limit);
checkError("Malloc");
cudaEvent_t start, stop;
float time;
createTimer(&start, &stop, &time);
startTimer(start);
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255); checkError("Z-R");
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255); checkError("Z-G");
zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255); checkError("Z-B");
//zeroKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255); checkError("Z-A");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 224, src.Width, src.Height, src.Pitch, dResultsDataR); checkError("R");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 28, src.Width, src.Height, src.Pitch, dResultsDataG); checkError("G");
histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 3, src.Width, src.Height, src.Pitch, dResultsDataB); checkError("B");
//histogramKernel<<<ks.dimGrid, ks.dimBlock>>>(src.dData, 3 << 6, src.Width, src.Height, src.Pitch, dResultsDataA); checkError("A");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255, dResultsMaxR); checkError("M-R");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255, dResultsMaxG); checkError("M-G");
maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255, dResultsMaxB); checkError("M-B");
//maxKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255, dResultsMaxA); checkError("M-A");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataR, 255, ch_limit, dResultsMaxR, dOutputDataR); checkError("2D-R");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataG, 255, ch_limit, dResultsMaxG, dOutputDataG); checkError("2D-G");
histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataB, 255, ch_limit, dResultsMaxB, dOutputDataB); checkError("2D-B");
//histogram2DKernel<<<ks2.dimGrid, ks2.blockSize>>>(dResultsDataA, 255, ch_limit, dResultsMaxA, dOutputDataA); checkError("2D-A");
stopTimer(start, stop, time);
printf("Time: %f ms\n", time);
saveChannel("R", ch_size, ch_limit, dOutputDataR);
saveChannel("G", ch_size, ch_limit, dOutputDataG);
saveChannel("B", ch_size, ch_limit, dOutputDataB);
//saveChannel("A", ch_size, ch_limit, dOutputDataA);
releaseMemory(src);
}
|
bc6147bbe94193dce5f3365985075fa512c8b8b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/LookupTableBag.cu"
#else
void THNN_(LookupTableBag_updateOutput)(
THCState *state,
THCIndexTensor *input,
THCIndexTensor *offsets,
THCTensor *weight,
THCTensor *output,
THCIndexTensor *offset2bag,
int mode,
THCIndexTensor *bag_size)
{
THCUNN_assertSameGPU(state, 5, input, offsets, weight, output, offset2bag);
if (!(THCIndexTensor_(isContiguous)(state, input) &&
THCIndexTensor_(isContiguous)(state, offsets) &&
THCTensor_(isContiguous)(state, weight))) {
THError("Tensors must be contiguous");
}
ptrdiff_t numIndices = THCIndexTensor_(size)(state, input, 0);
ptrdiff_t numBags = THCIndexTensor_(size)(state, offsets, 0);
ptrdiff_t stride = THCTensor_(size)(state, weight, 1);
int64_t *bag_size_data = NULL;
if (bag_size != NULL) {
bag_size_data = THCIndexTensor_(data)(state, bag_size);
}
hipStream_t stream = THCState_getCurrentStream(state);
THLongStorage *inputSize = THCIndexTensor_(newSizeOf)(state, input);
THLongStorage *outputSize = THLongStorage_newWithSize(2);
THLongStorage_data(outputSize)[0] = numBags;
THLongStorage_data(outputSize)[1] = stride;
THCTensor_(resize)(state, output, outputSize, NULL);
THCTensor_(zero)(state, output);
THCIndexTensor_(resize)(state, offset2bag, inputSize, NULL);
THLongStorage_free(inputSize);
THLongStorage_free(outputSize);
dim3 block = dim3(32, 8);
int grid = 1024;
hipLaunchKernelGGL(( cunn_LookupTableBag_updateOutputKernel<real, accreal>), dim3(grid), dim3(block), 0, stream,
THCIndexTensor_(data)(state, input),
THCIndexTensor_(data)(state, offsets),
THCTensor_(data)(state, weight),
THCTensor_(data)(state, output),
THCIndexTensor_(data)(state, offset2bag),
numIndices,
numBags,
stride,
mode,
bag_size_data
);
THCudaCheck(hipGetLastError());
}
void THNN_(LookupTableBag_accGradParameters)(
THCState *state,
THCIndexTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCIndexTensor *offset2bag,
THCIndexTensor *count,
THCIndexTensor *sortedIndices,
THCIndexTensor *origIndices,
bool scaleGradByFreq,
int mode,
THCIndexTensor *bag_size,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, offset2bag, sortedIndices, origIndices);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (!(THCIndexTensor_(isContiguous)(state, input) &&
THCTensor_(isContiguous)(state, gradWeight) &&
THCIndexTensor_(isContiguous)(state, offset2bag))) {
THError("Tensors must be contiguous");
}
int64_t *bag_size_data = NULL;
if (bag_size != NULL) {
bag_size_data = THCIndexTensor_(data)(state, bag_size);
}
int nDim = THCIndexTensor_(_nDimension)(state, input);
if (THCIndexTensor_(_nDimension)(state, input) != 1 && THCIndexTensor_(_nDimension)(state, input) != 2) {
THCDescBuff s1 = THCIndexTensor_(sizeDesc)(state, input);
THError("input must be a vector or matrix, but is of shape: %s", s1.str);
}
ptrdiff_t numel = THCIndexTensor_(nElement)(state, input);
int64_t stride = THCTensor_(stride)(state, gradWeight, 0);
hipStream_t stream = THCState_getCurrentStream(state);
THLongStorage *inputSize = THCIndexTensor_(newSizeOf)(state, input);
THCIndexTensor_(resize)(state, sortedIndices, inputSize, NULL);
THCIndexTensor_(resize)(state, origIndices, inputSize, NULL);
THLongStorage_free(inputSize);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
THCIndexTensor_(copy)(state, sortedIndices, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<THCIndex_t>
sortedIndicesIter(THCIndexTensor_(data)(state, sortedIndices));
thrust::device_ptr<THCIndex_t>
origIndicesIter(THCIndexTensor_(data)(state, origIndices));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<THCIndex_t> countIter(TH_INDEX_BASE);
thrust::copy(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
countIter, countIter + numel, origIndicesIter);
// Sort; a stable sort is not required
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
sortedIndicesIter, sortedIndicesIter + numel,
origIndicesIter, ThrustLTOp<int64_t>());
}
THCIndex_t *sortedIndices_data = THCIndexTensor_(data)(state, sortedIndices);
THCIndex_t *origIndices_data = THCIndexTensor_(data)(state, origIndices);
THCIndex_t *offset2bag_data = THCIndexTensor_(data)(state, offset2bag);
THCIndex_t *count_data = NULL;
if (scaleGradByFreq) {
THCIndexTensor_(resizeAs)(state, count, input);
count_data = THCIndexTensor_(data)(state, count);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<THCIndex_t> sortedIndices_ptr(sortedIndices_data);
thrust::device_ptr<THCIndex_t> count_ptr(count_data);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
thrust::inclusive_scan_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
sortedIndices_ptr,
sortedIndices_ptr + numel,
thrust::make_constant_iterator(1),
count_ptr
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
thrust::make_reverse_iterator(sortedIndices_ptr + numel),
thrust::make_reverse_iterator(sortedIndices_ptr),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(numel, (ptrdiff_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
hipLaunchKernelGGL(( cunn_LookupTableBag_accGradParametersKernel<real, accreal>), dim3(grid), dim3(block), 0, stream,
sortedIndices_data,
origIndices_data,
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, gradWeight),
offset2bag_data,
count_data,
scale,
numel,
stride,
mode,
bag_size_data
);
THCTensor_(free)(state, gradOutput);
THCudaCheck(hipGetLastError());
}
#endif
|
bc6147bbe94193dce5f3365985075fa512c8b8b0.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/LookupTableBag.cu"
#else
void THNN_(LookupTableBag_updateOutput)(
THCState *state,
THCIndexTensor *input,
THCIndexTensor *offsets,
THCTensor *weight,
THCTensor *output,
THCIndexTensor *offset2bag,
int mode,
THCIndexTensor *bag_size)
{
THCUNN_assertSameGPU(state, 5, input, offsets, weight, output, offset2bag);
if (!(THCIndexTensor_(isContiguous)(state, input) &&
THCIndexTensor_(isContiguous)(state, offsets) &&
THCTensor_(isContiguous)(state, weight))) {
THError("Tensors must be contiguous");
}
ptrdiff_t numIndices = THCIndexTensor_(size)(state, input, 0);
ptrdiff_t numBags = THCIndexTensor_(size)(state, offsets, 0);
ptrdiff_t stride = THCTensor_(size)(state, weight, 1);
int64_t *bag_size_data = NULL;
if (bag_size != NULL) {
bag_size_data = THCIndexTensor_(data)(state, bag_size);
}
cudaStream_t stream = THCState_getCurrentStream(state);
THLongStorage *inputSize = THCIndexTensor_(newSizeOf)(state, input);
THLongStorage *outputSize = THLongStorage_newWithSize(2);
THLongStorage_data(outputSize)[0] = numBags;
THLongStorage_data(outputSize)[1] = stride;
THCTensor_(resize)(state, output, outputSize, NULL);
THCTensor_(zero)(state, output);
THCIndexTensor_(resize)(state, offset2bag, inputSize, NULL);
THLongStorage_free(inputSize);
THLongStorage_free(outputSize);
dim3 block = dim3(32, 8);
int grid = 1024;
cunn_LookupTableBag_updateOutputKernel<real, accreal><<<grid, block, 0, stream>>>(
THCIndexTensor_(data)(state, input),
THCIndexTensor_(data)(state, offsets),
THCTensor_(data)(state, weight),
THCTensor_(data)(state, output),
THCIndexTensor_(data)(state, offset2bag),
numIndices,
numBags,
stride,
mode,
bag_size_data
);
THCudaCheck(cudaGetLastError());
}
void THNN_(LookupTableBag_accGradParameters)(
THCState *state,
THCIndexTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCIndexTensor *offset2bag,
THCIndexTensor *count,
THCIndexTensor *sortedIndices,
THCIndexTensor *origIndices,
bool scaleGradByFreq,
int mode,
THCIndexTensor *bag_size,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, offset2bag, sortedIndices, origIndices);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (!(THCIndexTensor_(isContiguous)(state, input) &&
THCTensor_(isContiguous)(state, gradWeight) &&
THCIndexTensor_(isContiguous)(state, offset2bag))) {
THError("Tensors must be contiguous");
}
int64_t *bag_size_data = NULL;
if (bag_size != NULL) {
bag_size_data = THCIndexTensor_(data)(state, bag_size);
}
int nDim = THCIndexTensor_(_nDimension)(state, input);
if (THCIndexTensor_(_nDimension)(state, input) != 1 && THCIndexTensor_(_nDimension)(state, input) != 2) {
THCDescBuff s1 = THCIndexTensor_(sizeDesc)(state, input);
THError("input must be a vector or matrix, but is of shape: %s", s1.str);
}
ptrdiff_t numel = THCIndexTensor_(nElement)(state, input);
int64_t stride = THCTensor_(stride)(state, gradWeight, 0);
cudaStream_t stream = THCState_getCurrentStream(state);
THLongStorage *inputSize = THCIndexTensor_(newSizeOf)(state, input);
THCIndexTensor_(resize)(state, sortedIndices, inputSize, NULL);
THCIndexTensor_(resize)(state, origIndices, inputSize, NULL);
THLongStorage_free(inputSize);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
THCIndexTensor_(copy)(state, sortedIndices, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<THCIndex_t>
sortedIndicesIter(THCIndexTensor_(data)(state, sortedIndices));
thrust::device_ptr<THCIndex_t>
origIndicesIter(THCIndexTensor_(data)(state, origIndices));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<THCIndex_t> countIter(TH_INDEX_BASE);
thrust::copy(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
countIter, countIter + numel, origIndicesIter);
// Sort; a stable sort is not required
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
sortedIndicesIter, sortedIndicesIter + numel,
origIndicesIter, ThrustLTOp<int64_t>());
}
THCIndex_t *sortedIndices_data = THCIndexTensor_(data)(state, sortedIndices);
THCIndex_t *origIndices_data = THCIndexTensor_(data)(state, origIndices);
THCIndex_t *offset2bag_data = THCIndexTensor_(data)(state, offset2bag);
THCIndex_t *count_data = NULL;
if (scaleGradByFreq) {
THCIndexTensor_(resizeAs)(state, count, input);
count_data = THCIndexTensor_(data)(state, count);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<THCIndex_t> sortedIndices_ptr(sortedIndices_data);
thrust::device_ptr<THCIndex_t> count_ptr(count_data);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
thrust::inclusive_scan_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
sortedIndices_ptr,
sortedIndices_ptr + numel,
thrust::make_constant_iterator(1),
count_ptr
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
thrust::make_reverse_iterator(sortedIndices_ptr + numel),
thrust::make_reverse_iterator(sortedIndices_ptr),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(numel, (ptrdiff_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
cunn_LookupTableBag_accGradParametersKernel<real, accreal><<<grid, block, 0, stream>>>(
sortedIndices_data,
origIndices_data,
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, gradWeight),
offset2bag_data,
count_data,
scale,
numel,
stride,
mode,
bag_size_data
);
THCTensor_(free)(state, gradOutput);
THCudaCheck(cudaGetLastError());
}
#endif
|
c2abe6a6106ae547dcdb6e45c42b7f6fcec62abd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include<math.h>
#include "gpu_hashtable.hpp"
#define PERFORMACE_VIEW 1
#define MIN_LOAD_FACTOR 0.9
#define EMPTY_KEY 0
#define EMPTY_VALUE 0
#define NUM_BLOCKS(n) (((n) + 255) / 256)
#define NUM_THREADS 256
/* Functie de hash */
__device__ int hash_func(int k, int htable_size)
{
k = ((k >> 16) ^ k) * 0x45d9f3b;
k = ((k >> 16) ^ k) * 0x45d9f3b;
k = (k >> 16) ^ k;
/* rezultatul final trebuie
* sa se incadreze in limita data - size
*/
return k % htable_size;
}
/* Initializarea hashtable-ului */
__global__ void gpu_init_hashTable(entry_t *htable, const int size)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* initializarea se face in limitele dimensiunii */
if (threadId < size) {
htable[threadId].key = EMPTY_KEY;
htable[threadId].value = EMPTY_VALUE;
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
/* alocare de memorie pentru htable */
hipMalloc(&htable, size * sizeof(entry_t));
DIE(htable == 0, "hipMalloc htable");
/* alocare de memorie pentru contorul elementelor din hashtable */
hipMallocManaged(&count, sizeof(unsigned int));
DIE(count == 0, "hipMallocManaged count");
/* initializarea valorilor din hashtable */
hipLaunchKernelGGL(( gpu_init_hashTable), dim3(NUM_BLOCKS(size)), dim3(NUM_THREADS), 0, 0, htable, size);
hipDeviceSynchronize();
/* initializarea marimii hashtable-ului */
htable_size = size;
/* initializarea contorului de elemente din hashtable */
*count = 0;
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
/* eliberarea memoriei pentru hashtable */
if (htable != 0)
hipFree(htable);
/* eliberarea memoriei pentru contor */
if (count != 0)
hipFree(count);
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
int size = numBucketsReshape;
if (htable != 0)
hipFree(htable);
/* alocare de memorie pentru htable */
hipMalloc(&htable, size * sizeof(entry_t));
DIE(htable == 0, "hipMalloc htable");
if (count != 0)
hipFree(count);
/* alocare de memorie pentru contorul elementelor din hashtable */
hipMallocManaged(&count, sizeof(unsigned int));
DIE(count == 0, "hipMallocManaged count");
/* initializarea valorilor din hashtable */
hipLaunchKernelGGL(( gpu_init_hashTable), dim3(NUM_BLOCKS(size)), dim3(NUM_THREADS), 0, 0, htable, size);
hipDeviceSynchronize();
/* initializarea marimii hashtable-ului */
htable_size = size;
/* initializarea contorului de elemente din hashtable */
*count = 0;
}
/* Construirea unui hashtable pe baza unui alt hashtable */
__global__ void gpu_hashtable_copy(entry_t *old_htable, entry_t *new_htable, const int old_htable_size, const int new_htable_size)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* vream sa adaugam elemente care
* se gasesc in limitele vechiului hashtable
*/
if (threadId >= old_htable_size)
return;
/* cheia pentru thread-ul curent din vechiul hashtable */
int key = old_htable[threadId].key;
/* pentru perechile goale
* nu avem ce aduaga in noul hashtable
*/
if (key == EMPTY_KEY)
return;
int current_key;
int index = hash_func(key, new_htable_size);
/* cautarea unui slot valabil pentru adaugarea unei noi perechi cheie:valoare */
while (1) {
/* verificam (si actualizam) atomic faptul ca slot-ul este liber */
current_key = atomicCAS(&new_htable[index].key, EMPTY_KEY, key);
/* daca a fost gasit un slot liber este folosit acesta */
if (current_key == EMPTY_KEY || current_key == key) {
new_htable[index].value = old_htable[threadId].value;
return;
}
/* daca slot-ul curent este ocupat cautam in continuare */
index = (index + 1) % new_htable_size;
}
}
/* Adugarea unei noi perechi in hashtable */
__global__ void gpu_hashtable_insert(entry_t *htable, unsigned int *count, const int htable_size, const int *keys, const int *values, const int numKeys)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* id-ul trebuie sa se afle in limitele date */
if (threadId >= numKeys)
return;
int key = keys[threadId];
int value = values[threadId];
int current_key;
int index = hash_func(key, htable_size);
/* pentru cheia curenta cautam un slot liber pentru a o adauga */
while (1) {
/* verificam (si actualizam) atomic faptul ca slot-ul este liber */
current_key = atomicCAS(&htable[index].key, EMPTY_KEY, key);
/* daca a fost gasit un slot liber este folosit acesta */
if (current_key == EMPTY_KEY || current_key == key) {
htable[index].value = value;
/* daca spatiul era liber inseamna ca a fost adaugat un nou element
* alternativ, aceasta inserare ar fi presupus actualizarea unei valori
* caz in care nu ar creste numarul de elemente din hashmap
*/
if (current_key == EMPTY_KEY)
atomicAdd(count, 1);
return;
}
/* daca slot-ul curent este ocupat cautam in continuare */
index = (index + 1) % htable_size;
}
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int *device_keys;
int *device_values;
int old_htable_size;
entry_t *new_htable;
/* verificam faptul ca avem suficient loc pentru noile elemente */
if (*count + numKeys > MIN_LOAD_FACTOR * htable_size) {
/* //////////////////////////////////////////////////////////////
* ///////////// Redimensionarea hashthable-ului ///////////////
* //////////////////////////////////////////////////////////////
*/
old_htable_size = htable_size;
/* noua marime se caluleaza in functie de gradul de ocupare dorit */
htable_size = (*count + numKeys) / MIN_LOAD_FACTOR;
/* alocarea memoriei pentru noul hashtable */
hipMalloc(&new_htable, htable_size * sizeof(entry_t));
DIE(new_htable == 0, "hipMalloc new_htable");
/* initializarea noului hashtable */
hipLaunchKernelGGL(( gpu_init_hashTable), dim3(NUM_BLOCKS(htable_size)), dim3(NUM_THREADS), 0, 0, new_htable, htable_size);
hipDeviceSynchronize();
/* introducerea datelor existente in noul hashtable */
hipLaunchKernelGGL(( gpu_hashtable_copy), dim3(NUM_BLOCKS(htable_size)), dim3(NUM_THREADS), 0, 0, htable, new_htable, old_htable_size, htable_size);
hipDeviceSynchronize();
/* eliberarea memoriei vechiului hashtable */
hipFree(htable);
/* actualizarea noului hashtable */
htable = new_htable;
}
/* alocare de memorie pentru parametrii pentru kernel */
hipMalloc((void **) &device_keys, numKeys * sizeof(int));
DIE(device_keys == 0, "hipMalloc device_keys");
hipMalloc((void **) &device_values, numKeys * sizeof(int));
DIE(device_values == 0, "hipMalloc device_keys");
/* copierea datelor pentru chei si valori */
hipMemcpy(device_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_values, values, numKeys * sizeof(int), hipMemcpyHostToDevice);
#if PERFORMACE_VIEW
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
#endif
/* inserarea noilor elemente in hashtable */
hipLaunchKernelGGL(( gpu_hashtable_insert), dim3(NUM_BLOCKS(numKeys)), dim3(NUM_THREADS), 0, 0, htable, count, htable_size, device_keys, device_values, numKeys);
hipDeviceSynchronize();
#if PERFORMACE_VIEW
hipEventRecord(stop);
hipEventSynchronize(stop);
float time = 0;
hipEventElapsedTime(&time, start, stop);
float seconds = time / 1000.0f;
printf("Inserted %d elements in %f ms (%f million keys/second)\n", numKeys, time, numKeys / (double)seconds / 1000000.0f);
#endif
/* eliberarea memoriei pentru parametrii kernel-ului */
hipFree(device_keys);
hipFree(device_values);
return true;
}
/* Returnarea valorilor pentru un set de chei */
__global__ void gpu_hashtable_lookup(entry_t *htable, const int htable_size, const int *keys, int *values, const int numKeys)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* id-ul trebuie sa se afle in limitele date */
if (threadId >= numKeys)
return;
int key;
int index;
int timeout = 0;
key = keys[threadId];
index = hash_func(key, htable_size);
/* se cauta valoarea asociata cheii date */
while (true) {
/* daca a fost parcurs intreg hashtable-ul
* atunci cheia data nu se regaseste in acesta
*/
if (timeout == htable_size) {
values[threadId] = EMPTY_VALUE;
return;
}
/* daca a fost gasita o potrivire
* atunci intoarcem valoarea de la acel index
*/
if (htable[index].key == key) {
values[threadId] = htable[index].value;
return;
}
/* daca index-ul curent nu avea cheia cautata
* atunci continuam cautarea
*/
index = (index + 1) % htable_size;
timeout += 1;
}
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int *values;
int *device_keys;
int *device_values;
/* alocare de memorie pentru a retine valorile cautate */
values = (int *)malloc(numKeys * sizeof(int));
/* alocare de memorie pentru parametrii functiei de cautare */
hipMalloc((void **) &device_keys, numKeys * sizeof(int));
DIE(device_keys == 0, "hipMalloc device_keys");
hipMalloc((void **) &device_values, numKeys * sizeof(int));
DIE(device_values == 0, "hipMalloc device_keys");
/* copierea datelor pentru cheile cautate */
hipMemcpy(device_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
#if PERFORMACE_VIEW
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
#endif
/* cautarea valorilor asociate cheilor date */
hipLaunchKernelGGL(( gpu_hashtable_lookup), dim3(NUM_BLOCKS(numKeys)), dim3(NUM_THREADS), 0, 0, htable, htable_size, device_keys, device_values, numKeys);
hipDeviceSynchronize();
#if PERFORMACE_VIEW
hipEventRecord(stop);
hipEventSynchronize(stop);
float time = 0;
hipEventElapsedTime(&time, start, stop);
float seconds = time / 1000.0f;
printf("Got %d elements in %f ms (%f million keys/second)\n", numKeys, time, numKeys / (double)seconds / 1000000.0f);
#endif
/* copierea valorilor gasite pe Host */
hipMemcpy(values, device_values, numKeys * sizeof(int), hipMemcpyDeviceToHost);
return values;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
/*
* count - numarul de elemente aflate in hashtable
* htable_size - spatiul total alocat pentru hashtable
*/
return (float)*count /(float)htable_size; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
|
c2abe6a6106ae547dcdb6e45c42b7f6fcec62abd.cu
|
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include<math.h>
#include "gpu_hashtable.hpp"
#define PERFORMACE_VIEW 1
#define MIN_LOAD_FACTOR 0.9
#define EMPTY_KEY 0
#define EMPTY_VALUE 0
#define NUM_BLOCKS(n) (((n) + 255) / 256)
#define NUM_THREADS 256
/* Functie de hash */
__device__ int hash_func(int k, int htable_size)
{
k = ((k >> 16) ^ k) * 0x45d9f3b;
k = ((k >> 16) ^ k) * 0x45d9f3b;
k = (k >> 16) ^ k;
/* rezultatul final trebuie
* sa se incadreze in limita data - size
*/
return k % htable_size;
}
/* Initializarea hashtable-ului */
__global__ void gpu_init_hashTable(entry_t *htable, const int size)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* initializarea se face in limitele dimensiunii */
if (threadId < size) {
htable[threadId].key = EMPTY_KEY;
htable[threadId].value = EMPTY_VALUE;
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
/* alocare de memorie pentru htable */
cudaMalloc(&htable, size * sizeof(entry_t));
DIE(htable == 0, "cudaMalloc htable");
/* alocare de memorie pentru contorul elementelor din hashtable */
cudaMallocManaged(&count, sizeof(unsigned int));
DIE(count == 0, "cudaMallocManaged count");
/* initializarea valorilor din hashtable */
gpu_init_hashTable<<<NUM_BLOCKS(size), NUM_THREADS>>>(htable, size);
cudaDeviceSynchronize();
/* initializarea marimii hashtable-ului */
htable_size = size;
/* initializarea contorului de elemente din hashtable */
*count = 0;
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
/* eliberarea memoriei pentru hashtable */
if (htable != 0)
cudaFree(htable);
/* eliberarea memoriei pentru contor */
if (count != 0)
cudaFree(count);
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
int size = numBucketsReshape;
if (htable != 0)
cudaFree(htable);
/* alocare de memorie pentru htable */
cudaMalloc(&htable, size * sizeof(entry_t));
DIE(htable == 0, "cudaMalloc htable");
if (count != 0)
cudaFree(count);
/* alocare de memorie pentru contorul elementelor din hashtable */
cudaMallocManaged(&count, sizeof(unsigned int));
DIE(count == 0, "cudaMallocManaged count");
/* initializarea valorilor din hashtable */
gpu_init_hashTable<<<NUM_BLOCKS(size), NUM_THREADS>>>(htable, size);
cudaDeviceSynchronize();
/* initializarea marimii hashtable-ului */
htable_size = size;
/* initializarea contorului de elemente din hashtable */
*count = 0;
}
/* Construirea unui hashtable pe baza unui alt hashtable */
__global__ void gpu_hashtable_copy(entry_t *old_htable, entry_t *new_htable, const int old_htable_size, const int new_htable_size)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* vream sa adaugam elemente care
* se gasesc in limitele vechiului hashtable
*/
if (threadId >= old_htable_size)
return;
/* cheia pentru thread-ul curent din vechiul hashtable */
int key = old_htable[threadId].key;
/* pentru perechile goale
* nu avem ce aduaga in noul hashtable
*/
if (key == EMPTY_KEY)
return;
int current_key;
int index = hash_func(key, new_htable_size);
/* cautarea unui slot valabil pentru adaugarea unei noi perechi cheie:valoare */
while (1) {
/* verificam (si actualizam) atomic faptul ca slot-ul este liber */
current_key = atomicCAS(&new_htable[index].key, EMPTY_KEY, key);
/* daca a fost gasit un slot liber este folosit acesta */
if (current_key == EMPTY_KEY || current_key == key) {
new_htable[index].value = old_htable[threadId].value;
return;
}
/* daca slot-ul curent este ocupat cautam in continuare */
index = (index + 1) % new_htable_size;
}
}
/* Adugarea unei noi perechi in hashtable */
__global__ void gpu_hashtable_insert(entry_t *htable, unsigned int *count, const int htable_size, const int *keys, const int *values, const int numKeys)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* id-ul trebuie sa se afle in limitele date */
if (threadId >= numKeys)
return;
int key = keys[threadId];
int value = values[threadId];
int current_key;
int index = hash_func(key, htable_size);
/* pentru cheia curenta cautam un slot liber pentru a o adauga */
while (1) {
/* verificam (si actualizam) atomic faptul ca slot-ul este liber */
current_key = atomicCAS(&htable[index].key, EMPTY_KEY, key);
/* daca a fost gasit un slot liber este folosit acesta */
if (current_key == EMPTY_KEY || current_key == key) {
htable[index].value = value;
/* daca spatiul era liber inseamna ca a fost adaugat un nou element
* alternativ, aceasta inserare ar fi presupus actualizarea unei valori
* caz in care nu ar creste numarul de elemente din hashmap
*/
if (current_key == EMPTY_KEY)
atomicAdd(count, 1);
return;
}
/* daca slot-ul curent este ocupat cautam in continuare */
index = (index + 1) % htable_size;
}
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int *device_keys;
int *device_values;
int old_htable_size;
entry_t *new_htable;
/* verificam faptul ca avem suficient loc pentru noile elemente */
if (*count + numKeys > MIN_LOAD_FACTOR * htable_size) {
/* //////////////////////////////////////////////////////////////
* ///////////// Redimensionarea hashthable-ului ///////////////
* //////////////////////////////////////////////////////////////
*/
old_htable_size = htable_size;
/* noua marime se caluleaza in functie de gradul de ocupare dorit */
htable_size = (*count + numKeys) / MIN_LOAD_FACTOR;
/* alocarea memoriei pentru noul hashtable */
cudaMalloc(&new_htable, htable_size * sizeof(entry_t));
DIE(new_htable == 0, "cudaMalloc new_htable");
/* initializarea noului hashtable */
gpu_init_hashTable<<<NUM_BLOCKS(htable_size), NUM_THREADS>>>(new_htable, htable_size);
cudaDeviceSynchronize();
/* introducerea datelor existente in noul hashtable */
gpu_hashtable_copy<<<NUM_BLOCKS(htable_size), NUM_THREADS>>>(htable, new_htable, old_htable_size, htable_size);
cudaDeviceSynchronize();
/* eliberarea memoriei vechiului hashtable */
cudaFree(htable);
/* actualizarea noului hashtable */
htable = new_htable;
}
/* alocare de memorie pentru parametrii pentru kernel */
cudaMalloc((void **) &device_keys, numKeys * sizeof(int));
DIE(device_keys == 0, "cudaMalloc device_keys");
cudaMalloc((void **) &device_values, numKeys * sizeof(int));
DIE(device_values == 0, "cudaMalloc device_keys");
/* copierea datelor pentru chei si valori */
cudaMemcpy(device_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_values, values, numKeys * sizeof(int), cudaMemcpyHostToDevice);
#if PERFORMACE_VIEW
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
/* inserarea noilor elemente in hashtable */
gpu_hashtable_insert<<<NUM_BLOCKS(numKeys), NUM_THREADS>>>(htable, count, htable_size, device_keys, device_values, numKeys);
cudaDeviceSynchronize();
#if PERFORMACE_VIEW
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
float seconds = time / 1000.0f;
printf("Inserted %d elements in %f ms (%f million keys/second)\n", numKeys, time, numKeys / (double)seconds / 1000000.0f);
#endif
/* eliberarea memoriei pentru parametrii kernel-ului */
cudaFree(device_keys);
cudaFree(device_values);
return true;
}
/* Returnarea valorilor pentru un set de chei */
__global__ void gpu_hashtable_lookup(entry_t *htable, const int htable_size, const int *keys, int *values, const int numKeys)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
/* id-ul trebuie sa se afle in limitele date */
if (threadId >= numKeys)
return;
int key;
int index;
int timeout = 0;
key = keys[threadId];
index = hash_func(key, htable_size);
/* se cauta valoarea asociata cheii date */
while (true) {
/* daca a fost parcurs intreg hashtable-ul
* atunci cheia data nu se regaseste in acesta
*/
if (timeout == htable_size) {
values[threadId] = EMPTY_VALUE;
return;
}
/* daca a fost gasita o potrivire
* atunci intoarcem valoarea de la acel index
*/
if (htable[index].key == key) {
values[threadId] = htable[index].value;
return;
}
/* daca index-ul curent nu avea cheia cautata
* atunci continuam cautarea
*/
index = (index + 1) % htable_size;
timeout += 1;
}
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int *values;
int *device_keys;
int *device_values;
/* alocare de memorie pentru a retine valorile cautate */
values = (int *)malloc(numKeys * sizeof(int));
/* alocare de memorie pentru parametrii functiei de cautare */
cudaMalloc((void **) &device_keys, numKeys * sizeof(int));
DIE(device_keys == 0, "cudaMalloc device_keys");
cudaMalloc((void **) &device_values, numKeys * sizeof(int));
DIE(device_values == 0, "cudaMalloc device_keys");
/* copierea datelor pentru cheile cautate */
cudaMemcpy(device_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
#if PERFORMACE_VIEW
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
/* cautarea valorilor asociate cheilor date */
gpu_hashtable_lookup<<<NUM_BLOCKS(numKeys), NUM_THREADS>>>(htable, htable_size, device_keys, device_values, numKeys);
cudaDeviceSynchronize();
#if PERFORMACE_VIEW
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
float seconds = time / 1000.0f;
printf("Got %d elements in %f ms (%f million keys/second)\n", numKeys, time, numKeys / (double)seconds / 1000000.0f);
#endif
/* copierea valorilor gasite pe Host */
cudaMemcpy(values, device_values, numKeys * sizeof(int), cudaMemcpyDeviceToHost);
return values;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
/*
* count - numarul de elemente aflate in hashtable
* htable_size - spatiul total alocat pentru hashtable
*/
return (float)*count /(float)htable_size; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
|
4d079598e1a529d018d5d6913fe1507e29498ad9.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by zeyi on 1/9/19.
//
#include <fstream>
#include "hip/hip_runtime_api.h"
#include <thundergbm/tree.h>
#include <thundergbm/trainer.h>
#include <thundergbm/metric/metric.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/reduce.h"
#include "time.h"
#include "thundergbm/booster.h"
#include "chrono"
#include <thundergbm/parser.h>
using namespace std;
vector<vector<Tree>> TreeTrainer::train(GBMParam ¶m, const DataSet &dataset) {
if (param.tree_method == "auto")
if (dataset.n_features() > 20000)
param.tree_method = "exact";
else
param.tree_method = "hist";
//correct the number of classes
if(param.objective.find("multi:") != std::string::npos || param.objective.find("binary:") != std::string::npos) {
int num_class = dataset.label.size();
if (param.num_class != num_class) {
LOG(INFO) << "updating number of classes from " << param.num_class << " to " << num_class;
param.num_class = num_class;
}
}
else if(param.objective.find("reg:") != std::string::npos){
param.num_class = 1;
}
vector<vector<Tree>> boosted_model;
Booster booster;
booster.init(dataset, param);
std::chrono::high_resolution_clock timer;
auto start = timer.now();
for (int i = 0; i < param.n_trees; ++i) {
//one iteration may produce multiple trees, depending on objectives
booster.boost(boosted_model);
}
auto stop = timer.now();
std::chrono::duration<float> training_time = stop - start;
LOG(INFO) << "training time = " << training_time.count();
SyncMem::clear_cache();
return boosted_model;
}
|
4d079598e1a529d018d5d6913fe1507e29498ad9.cu
|
//
// Created by zeyi on 1/9/19.
//
#include <fstream>
#include "cuda_runtime_api.h"
#include <thundergbm/tree.h>
#include <thundergbm/trainer.h>
#include <thundergbm/metric/metric.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/reduce.h"
#include "time.h"
#include "thundergbm/booster.h"
#include "chrono"
#include <thundergbm/parser.h>
using namespace std;
vector<vector<Tree>> TreeTrainer::train(GBMParam ¶m, const DataSet &dataset) {
if (param.tree_method == "auto")
if (dataset.n_features() > 20000)
param.tree_method = "exact";
else
param.tree_method = "hist";
//correct the number of classes
if(param.objective.find("multi:") != std::string::npos || param.objective.find("binary:") != std::string::npos) {
int num_class = dataset.label.size();
if (param.num_class != num_class) {
LOG(INFO) << "updating number of classes from " << param.num_class << " to " << num_class;
param.num_class = num_class;
}
}
else if(param.objective.find("reg:") != std::string::npos){
param.num_class = 1;
}
vector<vector<Tree>> boosted_model;
Booster booster;
booster.init(dataset, param);
std::chrono::high_resolution_clock timer;
auto start = timer.now();
for (int i = 0; i < param.n_trees; ++i) {
//one iteration may produce multiple trees, depending on objectives
booster.boost(boosted_model);
}
auto stop = timer.now();
std::chrono::duration<float> training_time = stop - start;
LOG(INFO) << "training time = " << training_time.count();
SyncMem::clear_cache();
return boosted_model;
}
|
b3d2d9fe95e23c372f4ff06cf9dbe06de7aac2ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMath.hip"
#else
#include "ATen/hip/HIPContext.h"
#include <ATen/MemoryOverlap.h>
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
|
b3d2d9fe95e23c372f4ff06cf9dbe06de7aac2ba.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMath.cu"
#else
#include "ATen/cuda/CUDAContext.h"
#include <ATen/MemoryOverlap.h>
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
c10::cuda::getCurrentCUDAStream()));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
|
6d372bd15aa42d5e96cf97d3b57a4c6ac6c34c7a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <iomanip>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
#define MAX_ARRAY_SIZE 2048
#define RANDOM_MAX 2.0
#define RANDOM_MIN 1.0
#define TILE_WIDTH 32
#define EPSILON 0.000001
#define NUM_BLOCKS (MAX_ARRAY_SIZE/TILE_WIDTH)
float A[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
float F[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
float C[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
void serial();
void init_F();
int check();
__global__ void matrixMultiply1(float *, float *, int);
__global__ void matrixMultiply2(float *, float *, int);
__global__ void matrixMultiply3(float *, float *, int);
int main()
{
float *d_a, *d_c;
struct timeval startTime, endTime;
size_t memsize = MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * sizeof(float);
hipMalloc((void**) &d_a, memsize);
hipMalloc((void**) &d_c, memsize);
init_F();
hipMemcpy(d_a,A,memsize,hipMemcpyHostToDevice);
hipMemcpy(d_c,C,memsize,hipMemcpyHostToDevice);
gettimeofday(&startTime, NULL);
//serial();
//dim3 dimGrid1(1,1);
//dim3 dimBlock1(MAX_ARRAY_SIZE, MAX_ARRAY_SIZE);
dim3 dimGrid2(MAX_ARRAY_SIZE/TILE_WIDTH, MAX_ARRAY_SIZE/TILE_WIDTH);
dim3 dimBlock2(TILE_WIDTH, TILE_WIDTH);
hipLaunchKernelGGL(( matrixMultiply1), dim3(dimGrid2), dim3(dimBlock2) , 0, 0, d_a,d_c,MAX_ARRAY_SIZE);
//matrixMultiply2<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);*/
//matrixMultiply3<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);
gettimeofday(&endTime, NULL);
long seconds = endTime.tv_sec - startTime.tv_sec;
long useconds = endTime.tv_usec - startTime.tv_usec;
double duration = seconds + useconds/1000000.0;
cout<<"\nTime taken for Matrix Multiplication on GPU (time in sec): "<<fixed<<setprecision(7)<<duration;
cout<<"\nPerformance Metrics (GFlops/sec):"<<fixed<<setprecision(6)<<((2 * (long)MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * MAX_ARRAY_SIZE))/(1e9 * duration);
cout<<endl;
hipMemcpy(C,d_c,memsize,hipMemcpyDeviceToHost);
if(check() == 1) {
cout<<"\nMatrix Multiplication Successful!"<<endl;
}
hipFree(d_a);
hipFree(d_c);
return 0;
}
void init_F()
{
srand(time(NULL));
for (int i = 0; i < MAX_ARRAY_SIZE; i++){
for (int j = 0; j < MAX_ARRAY_SIZE; j++){
float r = ((float)rand()) / (float)RAND_MAX;
A[i][j] = RANDOM_MIN + r * (RANDOM_MAX - RANDOM_MIN);
}
}
}
__global__ void matrixMultiply1(float *A, float *C, int size) {
int Col = blockDim.y * blockIdx.y + threadIdx.y;
int Row = blockDim.x * blockIdx.x + threadIdx.x;
for(int k = 0; k < size; k++)
C[Row * size + Col] += A[k * size + Row] * A[k * size + Col];
}
__global__ void matrixMultiply2(float* A, float* C, int size)
{
float sum = 0;
int Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
if(Col < size && Row < size) {
for (int k = 0; k < size; k++)
sum += A[k * size + Row] * A[k * size + Col];
C[Row * size + Col] = sum;
}
}
__global__ void matrixMultiply3(float* A, float* C, int size) {
float CValue = 0;
int Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
for (int k = 0; k < (TILE_WIDTH + size - 1)/TILE_WIDTH; k++) {
if (k * TILE_WIDTH + threadIdx.x < size && Row < size)
As[threadIdx.y][threadIdx.x] = A[Row * size + k * TILE_WIDTH + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k * TILE_WIDTH + threadIdx.y < size && Col < size)
As[threadIdx.y][threadIdx.x] = A[(k*TILE_WIDTH + threadIdx.y) * size + Col];
else
As[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_WIDTH; ++n)
CValue += As[threadIdx.y][n] * As[n][threadIdx.x];
__syncthreads();
}
if (Row < size && Col < size)
C[((blockIdx.y * blockDim.y + threadIdx.y) * size) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
void serial()
{
for (int i = 0; i < MAX_ARRAY_SIZE; i++)
for (int j = 0; j < MAX_ARRAY_SIZE; j++)
for (int k = 0; k < MAX_ARRAY_SIZE; k++)
F[i][j] += A[k][i] * A[k][j];
}
int check()
{
for (int i = 0; i < MAX_ARRAY_SIZE; i++) {
for (int j = 0; j < MAX_ARRAY_SIZE; j++) {
if(abs(C[i][j] - F[i][j]) < EPSILON){
cout<<"\nMismatch at index: ("<<i<<","<<j<<")"<<endl;
return 0;
}
}
}
return 1;
}
|
6d372bd15aa42d5e96cf97d3b57a4c6ac6c34c7a.cu
|
#include <iostream>
#include <stdlib.h>
#include <iomanip>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
using namespace std;
#define MAX_ARRAY_SIZE 2048
#define RANDOM_MAX 2.0
#define RANDOM_MIN 1.0
#define TILE_WIDTH 32
#define EPSILON 0.000001
#define NUM_BLOCKS (MAX_ARRAY_SIZE/TILE_WIDTH)
float A[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
float F[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
float C[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE];
void serial();
void init_F();
int check();
__global__ void matrixMultiply1(float *, float *, int);
__global__ void matrixMultiply2(float *, float *, int);
__global__ void matrixMultiply3(float *, float *, int);
int main()
{
float *d_a, *d_c;
struct timeval startTime, endTime;
size_t memsize = MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * sizeof(float);
cudaMalloc((void**) &d_a, memsize);
cudaMalloc((void**) &d_c, memsize);
init_F();
cudaMemcpy(d_a,A,memsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,C,memsize,cudaMemcpyHostToDevice);
gettimeofday(&startTime, NULL);
//serial();
//dim3 dimGrid1(1,1);
//dim3 dimBlock1(MAX_ARRAY_SIZE, MAX_ARRAY_SIZE);
dim3 dimGrid2(MAX_ARRAY_SIZE/TILE_WIDTH, MAX_ARRAY_SIZE/TILE_WIDTH);
dim3 dimBlock2(TILE_WIDTH, TILE_WIDTH);
matrixMultiply1<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);
//matrixMultiply2<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);*/
//matrixMultiply3<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);
gettimeofday(&endTime, NULL);
long seconds = endTime.tv_sec - startTime.tv_sec;
long useconds = endTime.tv_usec - startTime.tv_usec;
double duration = seconds + useconds/1000000.0;
cout<<"\nTime taken for Matrix Multiplication on GPU (time in sec): "<<fixed<<setprecision(7)<<duration;
cout<<"\nPerformance Metrics (GFlops/sec):"<<fixed<<setprecision(6)<<((2 * (long)MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * MAX_ARRAY_SIZE))/(1e9 * duration);
cout<<endl;
cudaMemcpy(C,d_c,memsize,cudaMemcpyDeviceToHost);
if(check() == 1) {
cout<<"\nMatrix Multiplication Successful!"<<endl;
}
cudaFree(d_a);
cudaFree(d_c);
return 0;
}
void init_F()
{
srand(time(NULL));
for (int i = 0; i < MAX_ARRAY_SIZE; i++){
for (int j = 0; j < MAX_ARRAY_SIZE; j++){
float r = ((float)rand()) / (float)RAND_MAX;
A[i][j] = RANDOM_MIN + r * (RANDOM_MAX - RANDOM_MIN);
}
}
}
__global__ void matrixMultiply1(float *A, float *C, int size) {
int Col = blockDim.y * blockIdx.y + threadIdx.y;
int Row = blockDim.x * blockIdx.x + threadIdx.x;
for(int k = 0; k < size; k++)
C[Row * size + Col] += A[k * size + Row] * A[k * size + Col];
}
__global__ void matrixMultiply2(float* A, float* C, int size)
{
float sum = 0;
int Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
if(Col < size && Row < size) {
for (int k = 0; k < size; k++)
sum += A[k * size + Row] * A[k * size + Col];
C[Row * size + Col] = sum;
}
}
__global__ void matrixMultiply3(float* A, float* C, int size) {
float CValue = 0;
int Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
for (int k = 0; k < (TILE_WIDTH + size - 1)/TILE_WIDTH; k++) {
if (k * TILE_WIDTH + threadIdx.x < size && Row < size)
As[threadIdx.y][threadIdx.x] = A[Row * size + k * TILE_WIDTH + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k * TILE_WIDTH + threadIdx.y < size && Col < size)
As[threadIdx.y][threadIdx.x] = A[(k*TILE_WIDTH + threadIdx.y) * size + Col];
else
As[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_WIDTH; ++n)
CValue += As[threadIdx.y][n] * As[n][threadIdx.x];
__syncthreads();
}
if (Row < size && Col < size)
C[((blockIdx.y * blockDim.y + threadIdx.y) * size) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
void serial()
{
for (int i = 0; i < MAX_ARRAY_SIZE; i++)
for (int j = 0; j < MAX_ARRAY_SIZE; j++)
for (int k = 0; k < MAX_ARRAY_SIZE; k++)
F[i][j] += A[k][i] * A[k][j];
}
int check()
{
for (int i = 0; i < MAX_ARRAY_SIZE; i++) {
for (int j = 0; j < MAX_ARRAY_SIZE; j++) {
if(abs(C[i][j] - F[i][j]) < EPSILON){
cout<<"\nMismatch at index: ("<<i<<","<<j<<")"<<endl;
return 0;
}
}
}
return 1;
}
|
14d73fa95fd2c4abd5de646524ca4118f8b572d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef BRUTE_CRACKER_H_
#define BRUTE_CRACKER_H_
#include <mack/core/cracker.hpp>
#include <mack/options/values.hpp>
#include <hip/hip_runtime.h>
#include <mack/core/algorithm.cuh>
#include <mack/target_loader.hpp>
//keytable
#include <mack/keys/keytable.cuh>
#include <mack/keys/key.cuh>
//some helpers
#include "rainbow_cuda_cracker_helper.cuh"
namespace mack{
#define BRUTE_BLOCKS 12
#define BRUTE_THREADS 640
/**
* @class Brute_Cracker
* @is_of_type{crackers}
* @brief This is a brute force cracker.
*
* The brute force cracker takes every possible message
* and checks if this is the right one. Therefore it could
* take a very long time to do this.
*
* @option{m,length} message-, keystream- or keylength, every value up to 20 is possible
* @option{k,keytable} Choose between 'full_7bit', 'full_8bit',
* 'visible_ascii' or 'visible_german' charset.
* @option{d,device,0} Choose the device, default device is 0
* @option{t,target-file} Choose the file which contains the targets which should be cracked.
* @template_option{ALGORITHM,a,algorithm,algorithms}
* Choose the algorithm to be cracked.
* @author Paul Kramer
* @date 29.06.2012
* @version 0.1
*/
template<class ALGORITHM>
class Brute_Cracker : public mack::core::Cracker {
public:
Brute_Cracker(mack::options::values const* values);
void crack(mack::callbacks::Callback* callback, mack::targetloaders::Target_Loader* target_loader) const;
~Brute_Cracker();
private:
const std::string _target_file_path;
const int _devID;
const size_t _keylength;
const std::string _keytable;
};
//Kernel forward declaration
// template <class ALGORITHM>
// __global__
// void brute_kernel(unsigned char* targets, long target_count, bool* targets_found, long total_key_count, Keytable* device_keytable, size_t keylength);
#endif /* BRUTE_CRACKER_H_ */
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
template<class ALGORITHM>
Brute_Cracker<ALGORITHM>::Brute_Cracker(mack::options::values const* values) :
_target_file_path(values->get("target-file")),
_devID(values->cast<int>("device")),
_keylength(values->cast<size_t>("length")),
_keytable(values->get("keytable"))
{
// ALGORITHM::init(options);
}
/**
* @brief Prepares some data to improve the performance of the cracker.
* @details This method fills a given array of keys with new key objects.
* @param keys a device array of key pointers to fill, one key per thread
* @param keytable the keytable object, which is important for new key objects
* @param keylength the length of the keys
*/
template< class ALGORITHM >
__global__
void
prepare_keys_kernel(Key** keys, Keytable* keytable, size_t keylength)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//generating new key objects and store the pointers into the array
keys[threadid] = new Key(keytable, keylength);
//initialize the keys
keys[threadid]->increment(threadid);
}
/**
* @brief This method frees graphics cards memory from data which was generates in prepare_keys_kernel.
* @see prepare_keys_kernel
*/
template< class ALGORITHM >
__global__
void
clean_keys_kernel(Key** keys)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//removing objects
delete(keys[threadid]);
}
/**
* @brief Brute force kernel.
* @details This is the heart of the cracker, the brute force Cuda kernel. It takes some arguments:
* @param targets the targets to search for as array of the form: target1target2target3,
* using the number of the targets and the known target length on can divide them
* @param number_of_targets the number of the targets to search for
* @param keys an array of keys, every thread gets exactly one key
* @param keylength the length of the keys
* @param ciphertexts this variable is needed to improve the performance. During every brute force kernel call
* every thread needs some space to store the temporary ciphertexts (or in case of a hash function for the current hash).
* Instead of malloc some memory and free it during every thread, we build one array, which is shared between all threads.
* @param threadcount we need the number of threads while the operation, therefore we take this variable,
* it is faster to share this instead of recalculating it
* @param results an array of results. For every target we need one result, therefore we take this array.
*
*/
template< class ALGORITHM >
__global__
void
brute_kernel(unsigned char* targets, unsigned long number_of_targets,
Key** keys, unsigned long number_of_keys, size_t keylength,
unsigned char* ciphertexts,
unsigned int threadcount, Result* results)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//break if we are at the end (may be dangerous)
// if(threadid >= number_of_keys) return;
//algorithm
ALGORITHM algorithm;
unsigned int number_of_keys_per_thread = (unsigned int)(number_of_keys / threadcount)+1l;
int targetlength = algorithm.get_target_size(keylength);
//init the current candidate
mack::core::candidate candidate;
memset(candidate.value, 0, sizeof(mack::core::candidate));
candidate.length = keylength;
//every thread has to calculate multiple keys per kernel call
for(int j = 0; j < number_of_keys_per_thread; ++j)
{
//copy the key as candidate and encipher (or hash) it
memcpy(candidate.value, keys[threadid]->get_key_string(), keylength);
algorithm.compute_target(
candidate,
(ciphertexts + threadid * targetlength)
);
//then search within the targets for a matching one
for(long i = 0; i < number_of_targets; ++i)
{
//if one target hast the same value as the candidate, we found one!
if(cudaMemCmp(targets + (i * targetlength),
(ciphertexts + threadid * targetlength),
targetlength)){
//store the candidate in result field and set this target as solved,
//sometimes more than one thread found (different) result(s), then we overwrite it, because it does not matter
memcpy(results[i].key, candidate.value, keylength);
results[i].found = true;
}
}
//increment the own key to the next one for future work
keys[threadid]->increment(threadcount);
}
}
/**
* @brief The crack method.
* @see Cracker::crack
*/
template< class ALGORITHM >
void
Brute_Cracker<ALGORITHM>::crack(mack::callbacks::Callback* callback, mack::targetloaders::Target_Loader* target_loader) const
{
//inti cuda device properties field
struct hipDeviceProp_t prop;
//init device id
int devID = _devID;
//gets some device properties and selects the right device
hipSetDevice(devID);
hipGetDevice(&devID);
hipGetDeviceProperties(&prop,devID);
// Init keylength
size_t keylength = _keylength;
// Init keytable filename
std::stringstream ss;
//ss << "../src/keys/char_tables/";
//ss << "../src/keys/char_tables/";
ss << _keytable;
// Init chartable
unsigned int num_chars = 0;
unsigned char* char_table = char_table_read(ss.str().c_str(), num_chars);
//init device chartable
unsigned char* device_char_table;
hipMalloc(&device_char_table, num_chars * sizeof(unsigned char));
hipMemcpy(device_char_table, char_table, num_chars * sizeof(unsigned char), hipMemcpyHostToDevice);
//calc how much keys fit into gpus memory, its the maximum value
unsigned int number_of_threads = BRUTE_BLOCKS * BRUTE_THREADS;
if(((float)prop.totalGlobalMem * 0.9) / sizeof(Keytable) < number_of_threads)
{
//gpus memory is too small
std::cout << "ERROR: GPU Memory is too low, please decrease number of blocks or threads."<<std::endl;
return;
}
//init keytable
Keytable* keytable;
Keytable* device_keytable = new Keytable(device_char_table, num_chars);
hipMalloc((void**)&keytable, sizeof(Keytable));
hipMemcpy(keytable, device_keytable, sizeof(Keytable), hipMemcpyHostToDevice);
//init keys
Key** keys;
hipMalloc(&keys, number_of_threads * sizeof(Key*));
hipMemset(keys, 0, number_of_threads * sizeof(Key*));
//init algorithm
ALGORITHM algorithm;
int targetlength = algorithm.get_target_size(keylength);
//initialize space for ciphertexts
unsigned char* ciphertexts;
hipMalloc(&ciphertexts, number_of_threads * sizeof(unsigned char) * targetlength);
hipMemset(ciphertexts, 0, number_of_threads * sizeof(unsigned char) * targetlength);
//prepares the keys
std::cout << "Prepare some information...";
hipLaunchKernelGGL(( prepare_keys_kernel < ALGORITHM >) , dim3(BRUTE_BLOCKS), dim3(BRUTE_THREADS), 0, 0, keys, keytable, keylength);
std::cout << "done." << std::endl;
//load targets
//init targetloader
target_loader->init(algorithm.get_target_size(keylength));
unsigned char* host_targets = target_loader->load_file_8(_target_file_path.c_str());
unsigned long target_count = target_loader->get_target_count();
//init device targets and copy the host targets to the device
unsigned char* device_targets;
hipMalloc(&device_targets, sizeof(unsigned char) * target_count * targetlength);
hipMemcpy(device_targets, host_targets, sizeof(unsigned char) * target_count * targetlength, hipMemcpyHostToDevice);
//redundant code, in normal conditions the hipMemcpy will do the same, but maybe it prevents from errors
hipDeviceSynchronize();
//calculates the total number of possible keys and the number of keys per percent for the output and the loop
unsigned long total_number_of_keys = pow(num_chars, keylength);
std::cout<<"Total number of keys: "<<total_number_of_keys<<std::endl;
unsigned long number_of_keys_per_percent = ceil(total_number_of_keys / 100);
//prepare result array
Result* h_results = (Result*) malloc(target_count * sizeof(Result));
Result* d_results;
hipMalloc(&d_results, target_count * sizeof(Result));
hipMemset(d_results, 0, target_count * sizeof(Result));
// prepare cuda time measurement, we decided to measure only cuda runtime,
// because the amount of work for the cpu is not that high and otherwise we will get some mad outputs
hipEvent_t start, stop;
float time = 0.0f;
float totaltime = 0.0f;
std::cout << "Start brute force attack!"<<std::endl;
std::cout << "Number of Keys per Percent: " << number_of_keys_per_percent << std::endl;
//the main loop, for every percent we search the keys
for(int percent = 0; percent < 100; ++percent)
{
//cuda time measurement
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//calling the brute force kernel
hipLaunchKernelGGL(( brute_kernel< ALGORITHM >) , dim3(BRUTE_BLOCKS), dim3(BRUTE_THREADS), 0, 0, device_targets, target_count, keys, number_of_keys_per_percent, keylength, ciphertexts, number_of_threads, d_results);
//stop the time measurement...
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//..and sync the device, to be sure, that all threads done
hipDeviceSynchronize();
//calculate the runtime and print it on the console
hipEventElapsedTime(&time, start, stop);
std::cout << (percent+1)<< "% done. "<<std::endl;
std::cout << (number_of_keys_per_percent/time/1000.f) << "M Hashes per Sec."<<std::endl;
totaltime += time;
//output the estimated rest time
std::cout << "Rest: "<< (total_number_of_keys - (percent*number_of_keys_per_percent))/(number_of_keys_per_percent/time*1000.f) << "s"<<std::endl;
//backcopy results
hipMemcpy(h_results, d_results, target_count * sizeof(Result), hipMemcpyDeviceToHost);
bool found_all = true;
for(int i = 0; i < target_count; ++i){
found_all &= h_results[i].found;
if(!found_all) break;
}
//break the loop, if all targets where found
if(found_all) break;
}
// output all found targets to become an overview
for(int i = 0; i < target_count; ++i)
{
if(h_results[i].found){
// std::cout << "Found '"<< h_results[i].key<<"': ";
// printAsHex(host_targets + (targetlength * i), targetlength);
// std::cout << std::endl;
mack::core::candidate* cand = new mack::core::candidate();
cand->init(keylength);
memcpy(cand->value, h_results[i].key, keylength);
callback->call(host_targets + (targetlength * i), cand, targetlength);
delete cand;
}
}
// free the memory
hipFree(device_char_table);
free(host_targets);
hipLaunchKernelGGL(( clean_keys_kernel < ALGORITHM >) , dim3(BRUTE_BLOCKS), dim3(BRUTE_THREADS), 0, 0, keys);
hipFree(keys);
std::cout << "Done in "<<totaltime / 1000.f<<"s."<<std::endl;
}
template<class ALGORITHM>
Brute_Cracker<ALGORITHM>::~Brute_Cracker() {
}
}//close namespace mack
|
14d73fa95fd2c4abd5de646524ca4118f8b572d9.cu
|
#ifndef BRUTE_CRACKER_H_
#define BRUTE_CRACKER_H_
#include <mack/core/cracker.hpp>
#include <mack/options/values.hpp>
#include <cuda_runtime.h>
#include <mack/core/algorithm.cuh>
#include <mack/target_loader.hpp>
//keytable
#include <mack/keys/keytable.cuh>
#include <mack/keys/key.cuh>
//some helpers
#include "rainbow_cuda_cracker_helper.cuh"
namespace mack{
#define BRUTE_BLOCKS 12
#define BRUTE_THREADS 640
/**
* @class Brute_Cracker
* @is_of_type{crackers}
* @brief This is a brute force cracker.
*
* The brute force cracker takes every possible message
* and checks if this is the right one. Therefore it could
* take a very long time to do this.
*
* @option{m,length} message-, keystream- or keylength, every value up to 20 is possible
* @option{k,keytable} Choose between 'full_7bit', 'full_8bit',
* 'visible_ascii' or 'visible_german' charset.
* @option{d,device,0} Choose the device, default device is 0
* @option{t,target-file} Choose the file which contains the targets which should be cracked.
* @template_option{ALGORITHM,a,algorithm,algorithms}
* Choose the algorithm to be cracked.
* @author Paul Kramer
* @date 29.06.2012
* @version 0.1
*/
template<class ALGORITHM>
class Brute_Cracker : public mack::core::Cracker {
public:
Brute_Cracker(mack::options::values const* values);
void crack(mack::callbacks::Callback* callback, mack::targetloaders::Target_Loader* target_loader) const;
~Brute_Cracker();
private:
const std::string _target_file_path;
const int _devID;
const size_t _keylength;
const std::string _keytable;
};
//Kernel forward declaration
// template <class ALGORITHM>
// __global__
// void brute_kernel(unsigned char* targets, long target_count, bool* targets_found, long total_key_count, Keytable* device_keytable, size_t keylength);
#endif /* BRUTE_CRACKER_H_ */
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
template<class ALGORITHM>
Brute_Cracker<ALGORITHM>::Brute_Cracker(mack::options::values const* values) :
_target_file_path(values->get("target-file")),
_devID(values->cast<int>("device")),
_keylength(values->cast<size_t>("length")),
_keytable(values->get("keytable"))
{
// ALGORITHM::init(options);
}
/**
* @brief Prepares some data to improve the performance of the cracker.
* @details This method fills a given array of keys with new key objects.
* @param keys a device array of key pointers to fill, one key per thread
* @param keytable the keytable object, which is important for new key objects
* @param keylength the length of the keys
*/
template< class ALGORITHM >
__global__
void
prepare_keys_kernel(Key** keys, Keytable* keytable, size_t keylength)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//generating new key objects and store the pointers into the array
keys[threadid] = new Key(keytable, keylength);
//initialize the keys
keys[threadid]->increment(threadid);
}
/**
* @brief This method frees graphics cards memory from data which was generates in prepare_keys_kernel.
* @see prepare_keys_kernel
*/
template< class ALGORITHM >
__global__
void
clean_keys_kernel(Key** keys)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//removing objects
delete(keys[threadid]);
}
/**
* @brief Brute force kernel.
* @details This is the heart of the cracker, the brute force Cuda kernel. It takes some arguments:
* @param targets the targets to search for as array of the form: target1target2target3,
* using the number of the targets and the known target length on can divide them
* @param number_of_targets the number of the targets to search for
* @param keys an array of keys, every thread gets exactly one key
* @param keylength the length of the keys
* @param ciphertexts this variable is needed to improve the performance. During every brute force kernel call
* every thread needs some space to store the temporary ciphertexts (or in case of a hash function for the current hash).
* Instead of malloc some memory and free it during every thread, we build one array, which is shared between all threads.
* @param threadcount we need the number of threads while the operation, therefore we take this variable,
* it is faster to share this instead of recalculating it
* @param results an array of results. For every target we need one result, therefore we take this array.
*
*/
template< class ALGORITHM >
__global__
void
brute_kernel(unsigned char* targets, unsigned long number_of_targets,
Key** keys, unsigned long number_of_keys, size_t keylength,
unsigned char* ciphertexts,
unsigned int threadcount, Result* results)
{
//get unique thread id
int threadid = (blockIdx.y * gridDim.x + blockIdx.x)*(blockDim.y * blockDim.x) + threadIdx.y * blockDim.x + threadIdx.x;
//break if we are at the end (may be dangerous)
// if(threadid >= number_of_keys) return;
//algorithm
ALGORITHM algorithm;
unsigned int number_of_keys_per_thread = (unsigned int)(number_of_keys / threadcount)+1l;
int targetlength = algorithm.get_target_size(keylength);
//init the current candidate
mack::core::candidate candidate;
memset(candidate.value, 0, sizeof(mack::core::candidate));
candidate.length = keylength;
//every thread has to calculate multiple keys per kernel call
for(int j = 0; j < number_of_keys_per_thread; ++j)
{
//copy the key as candidate and encipher (or hash) it
memcpy(candidate.value, keys[threadid]->get_key_string(), keylength);
algorithm.compute_target(
candidate,
(ciphertexts + threadid * targetlength)
);
//then search within the targets for a matching one
for(long i = 0; i < number_of_targets; ++i)
{
//if one target hast the same value as the candidate, we found one!
if(cudaMemCmp(targets + (i * targetlength),
(ciphertexts + threadid * targetlength),
targetlength)){
//store the candidate in result field and set this target as solved,
//sometimes more than one thread found (different) result(s), then we overwrite it, because it does not matter
memcpy(results[i].key, candidate.value, keylength);
results[i].found = true;
}
}
//increment the own key to the next one for future work
keys[threadid]->increment(threadcount);
}
}
/**
* @brief The crack method.
* @see Cracker::crack
*/
template< class ALGORITHM >
void
Brute_Cracker<ALGORITHM>::crack(mack::callbacks::Callback* callback, mack::targetloaders::Target_Loader* target_loader) const
{
//inti cuda device properties field
struct cudaDeviceProp prop;
//init device id
int devID = _devID;
//gets some device properties and selects the right device
cudaSetDevice(devID);
cudaGetDevice(&devID);
cudaGetDeviceProperties(&prop,devID);
// Init keylength
size_t keylength = _keylength;
// Init keytable filename
std::stringstream ss;
//ss << "../src/keys/char_tables/";
//ss << "../src/keys/char_tables/";
ss << _keytable;
// Init chartable
unsigned int num_chars = 0;
unsigned char* char_table = char_table_read(ss.str().c_str(), num_chars);
//init device chartable
unsigned char* device_char_table;
cudaMalloc(&device_char_table, num_chars * sizeof(unsigned char));
cudaMemcpy(device_char_table, char_table, num_chars * sizeof(unsigned char), cudaMemcpyHostToDevice);
//calc how much keys fit into gpus memory, its the maximum value
unsigned int number_of_threads = BRUTE_BLOCKS * BRUTE_THREADS;
if(((float)prop.totalGlobalMem * 0.9) / sizeof(Keytable) < number_of_threads)
{
//gpus memory is too small
std::cout << "ERROR: GPU Memory is too low, please decrease number of blocks or threads."<<std::endl;
return;
}
//init keytable
Keytable* keytable;
Keytable* device_keytable = new Keytable(device_char_table, num_chars);
cudaMalloc((void**)&keytable, sizeof(Keytable));
cudaMemcpy(keytable, device_keytable, sizeof(Keytable), cudaMemcpyHostToDevice);
//init keys
Key** keys;
cudaMalloc(&keys, number_of_threads * sizeof(Key*));
cudaMemset(keys, 0, number_of_threads * sizeof(Key*));
//init algorithm
ALGORITHM algorithm;
int targetlength = algorithm.get_target_size(keylength);
//initialize space for ciphertexts
unsigned char* ciphertexts;
cudaMalloc(&ciphertexts, number_of_threads * sizeof(unsigned char) * targetlength);
cudaMemset(ciphertexts, 0, number_of_threads * sizeof(unsigned char) * targetlength);
//prepares the keys
std::cout << "Prepare some information...";
prepare_keys_kernel < ALGORITHM > <<<BRUTE_BLOCKS, BRUTE_THREADS>>>(keys, keytable, keylength);
std::cout << "done." << std::endl;
//load targets
//init targetloader
target_loader->init(algorithm.get_target_size(keylength));
unsigned char* host_targets = target_loader->load_file_8(_target_file_path.c_str());
unsigned long target_count = target_loader->get_target_count();
//init device targets and copy the host targets to the device
unsigned char* device_targets;
cudaMalloc(&device_targets, sizeof(unsigned char) * target_count * targetlength);
cudaMemcpy(device_targets, host_targets, sizeof(unsigned char) * target_count * targetlength, cudaMemcpyHostToDevice);
//redundant code, in normal conditions the cudaMemcpy will do the same, but maybe it prevents from errors
cudaDeviceSynchronize();
//calculates the total number of possible keys and the number of keys per percent for the output and the loop
unsigned long total_number_of_keys = pow(num_chars, keylength);
std::cout<<"Total number of keys: "<<total_number_of_keys<<std::endl;
unsigned long number_of_keys_per_percent = ceil(total_number_of_keys / 100);
//prepare result array
Result* h_results = (Result*) malloc(target_count * sizeof(Result));
Result* d_results;
cudaMalloc(&d_results, target_count * sizeof(Result));
cudaMemset(d_results, 0, target_count * sizeof(Result));
// prepare cuda time measurement, we decided to measure only cuda runtime,
// because the amount of work for the cpu is not that high and otherwise we will get some mad outputs
cudaEvent_t start, stop;
float time = 0.0f;
float totaltime = 0.0f;
std::cout << "Start brute force attack!"<<std::endl;
std::cout << "Number of Keys per Percent: " << number_of_keys_per_percent << std::endl;
//the main loop, for every percent we search the keys
for(int percent = 0; percent < 100; ++percent)
{
//cuda time measurement
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//calling the brute force kernel
brute_kernel< ALGORITHM > <<<BRUTE_BLOCKS, BRUTE_THREADS>>>(device_targets, target_count, keys, number_of_keys_per_percent, keylength, ciphertexts, number_of_threads, d_results);
//stop the time measurement...
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//..and sync the device, to be sure, that all threads done
cudaDeviceSynchronize();
//calculate the runtime and print it on the console
cudaEventElapsedTime(&time, start, stop);
std::cout << (percent+1)<< "% done. "<<std::endl;
std::cout << (number_of_keys_per_percent/time/1000.f) << "M Hashes per Sec."<<std::endl;
totaltime += time;
//output the estimated rest time
std::cout << "Rest: "<< (total_number_of_keys - (percent*number_of_keys_per_percent))/(number_of_keys_per_percent/time*1000.f) << "s"<<std::endl;
//backcopy results
cudaMemcpy(h_results, d_results, target_count * sizeof(Result), cudaMemcpyDeviceToHost);
bool found_all = true;
for(int i = 0; i < target_count; ++i){
found_all &= h_results[i].found;
if(!found_all) break;
}
//break the loop, if all targets where found
if(found_all) break;
}
// output all found targets to become an overview
for(int i = 0; i < target_count; ++i)
{
if(h_results[i].found){
// std::cout << "Found '"<< h_results[i].key<<"': ";
// printAsHex(host_targets + (targetlength * i), targetlength);
// std::cout << std::endl;
mack::core::candidate* cand = new mack::core::candidate();
cand->init(keylength);
memcpy(cand->value, h_results[i].key, keylength);
callback->call(host_targets + (targetlength * i), cand, targetlength);
delete cand;
}
}
// free the memory
cudaFree(device_char_table);
free(host_targets);
clean_keys_kernel < ALGORITHM > <<<BRUTE_BLOCKS, BRUTE_THREADS>>>(keys);
cudaFree(keys);
std::cout << "Done in "<<totaltime / 1000.f<<"s."<<std::endl;
}
template<class ALGORITHM>
Brute_Cracker<ALGORITHM>::~Brute_Cracker() {
}
}//close namespace mack
|
fc3d41712307bc3e8ed24c66c9b39e9eabd0e334.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "kernel_launcher.h"
namespace kl = kernel_launcher;
void cuda_check(hipError_t code) {
if (code != hipSuccess) {
throw std::runtime_error(
std::string("CUDA error: ") + hipGetErrorString(code));
}
}
std::string kernel_directory() {
// Find kernel file
std::string this_file = __FILE__;
std::string this_directory = this_file.substr(0, this_file.rfind('/'));
return this_directory + "/";
}
kl::KernelBuilder build_vector_add() {
// Tunable parameters
kl::KernelBuilder builder("vector_add", kernel_directory() + "/kernel.cu");
auto threads_per_block =
builder.tune("threads_per_block", {32, 64, 128, 256, 512, 1024});
auto blocks_per_sm =
builder.tune("blocks_per_sm", {1, 2, 3, 4, 5, 6, 7, 8});
auto items_per_thread =
builder.tune("elements_per_thread", {1, 2, 3, 4, 5, 6, 7, 8});
auto strategy = builder.tune("tiling_strategy", {0, 1, 2});
auto threads_per_sm = threads_per_block * blocks_per_sm;
auto items_per_block = threads_per_block * items_per_thread;
builder.restriction(threads_per_block <= kl::DEVICE_MAX_THREADS_PER_BLOCK);
builder.restriction(
threads_per_block * blocks_per_sm
<= kl::DEVICE_MAX_THREADS_PER_MULTIPROCESSOR);
auto [n, C, A, B] = kl::args<4>();
// Set options
builder.template_args(threads_per_block, items_per_thread, strategy)
.block_size(threads_per_block)
.grid_divisors(items_per_block)
.problem_size(n)
.buffers(C[n], A[n], B[n]);
return builder;
}
int main(int argc, char* argv[]) {
// Parse the number of elements N
int n = 1'000'000;
if (argc > 1) {
char* end = nullptr;
n = strtol(argv[1], &end, 10);
if (strlen(end)) {
std::cerr << "usage: " << argv[0] << " n\n";
return 1;
}
}
// Initialize inputs
std::vector<float> A(n), B(n), C_answer(n), C_result(n);
for (int i = 0; i < n; i++) {
A[i] = static_cast<float>(i);
B[i] = 1.0f;
C_answer[i] = A[i] + B[i];
}
// Allocate GPU memory
float *A_dev, *B_dev, *C_dev;
cuda_check(hipSetDevice(0));
cuda_check(hipMalloc(&A_dev, sizeof(float) * n));
cuda_check(hipMalloc(&B_dev, sizeof(float) * n));
cuda_check(hipMalloc(&C_dev, sizeof(float) * n));
cuda_check(
hipMemcpy(A_dev, A.data(), sizeof(float) * n, hipMemcpyDefault));
cuda_check(
hipMemcpy(B_dev, B.data(), sizeof(float) * n, hipMemcpyDefault));
// Create wisdom kernel
kl::WisdomKernel vector_add(build_vector_add());
// Call kernel
vector_add(
n,
kl::cuda_span(C_dev, n),
kl::cuda_span<const float>(A_dev, n),
kl::cuda_span<const float>(B_dev, n));
// Copy results back
cuda_check(hipMemcpy(
C_result.data(),
C_dev,
sizeof(float) * n,
hipMemcpyDefault));
// Check results
for (int i = 0; i < n; i++) {
float result = C_result[i];
float answer = C_answer[i];
if (result != answer) {
std::cout << "error: index " << i << " is incorrect: " << result
<< " != " << answer << "\n";
return 1;
}
}
std::cout << "result correct\n";
return 0;
}
|
fc3d41712307bc3e8ed24c66c9b39e9eabd0e334.cu
|
#include <vector>
#include "kernel_launcher.h"
namespace kl = kernel_launcher;
void cuda_check(cudaError_t code) {
if (code != cudaSuccess) {
throw std::runtime_error(
std::string("CUDA error: ") + cudaGetErrorString(code));
}
}
std::string kernel_directory() {
// Find kernel file
std::string this_file = __FILE__;
std::string this_directory = this_file.substr(0, this_file.rfind('/'));
return this_directory + "/";
}
kl::KernelBuilder build_vector_add() {
// Tunable parameters
kl::KernelBuilder builder("vector_add", kernel_directory() + "/kernel.cu");
auto threads_per_block =
builder.tune("threads_per_block", {32, 64, 128, 256, 512, 1024});
auto blocks_per_sm =
builder.tune("blocks_per_sm", {1, 2, 3, 4, 5, 6, 7, 8});
auto items_per_thread =
builder.tune("elements_per_thread", {1, 2, 3, 4, 5, 6, 7, 8});
auto strategy = builder.tune("tiling_strategy", {0, 1, 2});
auto threads_per_sm = threads_per_block * blocks_per_sm;
auto items_per_block = threads_per_block * items_per_thread;
builder.restriction(threads_per_block <= kl::DEVICE_MAX_THREADS_PER_BLOCK);
builder.restriction(
threads_per_block * blocks_per_sm
<= kl::DEVICE_MAX_THREADS_PER_MULTIPROCESSOR);
auto [n, C, A, B] = kl::args<4>();
// Set options
builder.template_args(threads_per_block, items_per_thread, strategy)
.block_size(threads_per_block)
.grid_divisors(items_per_block)
.problem_size(n)
.buffers(C[n], A[n], B[n]);
return builder;
}
int main(int argc, char* argv[]) {
// Parse the number of elements N
int n = 1'000'000;
if (argc > 1) {
char* end = nullptr;
n = strtol(argv[1], &end, 10);
if (strlen(end)) {
std::cerr << "usage: " << argv[0] << " n\n";
return 1;
}
}
// Initialize inputs
std::vector<float> A(n), B(n), C_answer(n), C_result(n);
for (int i = 0; i < n; i++) {
A[i] = static_cast<float>(i);
B[i] = 1.0f;
C_answer[i] = A[i] + B[i];
}
// Allocate GPU memory
float *A_dev, *B_dev, *C_dev;
cuda_check(cudaSetDevice(0));
cuda_check(cudaMalloc(&A_dev, sizeof(float) * n));
cuda_check(cudaMalloc(&B_dev, sizeof(float) * n));
cuda_check(cudaMalloc(&C_dev, sizeof(float) * n));
cuda_check(
cudaMemcpy(A_dev, A.data(), sizeof(float) * n, cudaMemcpyDefault));
cuda_check(
cudaMemcpy(B_dev, B.data(), sizeof(float) * n, cudaMemcpyDefault));
// Create wisdom kernel
kl::WisdomKernel vector_add(build_vector_add());
// Call kernel
vector_add(
n,
kl::cuda_span(C_dev, n),
kl::cuda_span<const float>(A_dev, n),
kl::cuda_span<const float>(B_dev, n));
// Copy results back
cuda_check(cudaMemcpy(
C_result.data(),
C_dev,
sizeof(float) * n,
cudaMemcpyDefault));
// Check results
for (int i = 0; i < n; i++) {
float result = C_result[i];
float answer = C_answer[i];
if (result != answer) {
std::cout << "error: index " << i << " is incorrect: " << result
<< " != " << answer << "\n";
return 1;
}
}
std::cout << "result correct\n";
return 0;
}
|
a7f9e32026de7aac4e9944a3becba712c1408420.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <hiprand/hiprand_kernel.h>
#include <inttypes.h>
#include <stdio.h>
#include <assert.h>
#include "mopsr_cuda.h"
#include "mopsr_delays_cuda_hires.h"
// maximum number of channels [320] * antenna [16] from 1 PFB
#define MOPSR_PFB_ANT_MAX 8
#define MOPSR_PFB_CHANANT_MAX 2560
#define MOPSR_MAX_ANT 352
#define WARP_SIZE 32
#define MEDIAN_FILTER 1
#define TWO_SIGMA
//#define ONE_SIGMA
//#define SK_FREQ_AVG
//#define SHOW_MASK // this puts the SK/TP masks into the output data!
//#define _GDEBUG 1
#ifdef USE_CONSTANT_MEMORY
__constant__ float d_ant_scales_delay [MOPSR_MAX_NANT_PER_AQ];
#endif
int hires_transpose_delay_alloc (transpose_delay_hires_t * ctx,
uint64_t block_size, unsigned nchan,
unsigned nant, unsigned ntap)
{
ctx->nchan = nchan;
ctx->nant = nant;
ctx->ntap = ntap;
ctx->half_ntap = ntap / 2;
const unsigned nchanant = nchan * nant;
const unsigned ndim = 2;
ctx->curr = (transpose_delay_hires_buf_t *) malloc (sizeof(transpose_delay_hires_buf_t));
ctx->next = (transpose_delay_hires_buf_t *) malloc (sizeof(transpose_delay_hires_buf_t));
ctx->buffer_size = block_size + (ndim * nchanant * ctx->half_ntap * 2);
size_t counter_size = ctx->nant * sizeof(unsigned);
if (hires_transpose_delay_buf_alloc (ctx->curr, ctx->buffer_size, counter_size) < 0)
{
fprintf (stderr, "hires_transpose_delay_alloc: hires_transpose_delay_buf_alloc failed\n");
return -1;
}
if (hires_transpose_delay_buf_alloc (ctx->next, ctx->buffer_size, counter_size) < 0)
{
fprintf (stderr, "hires_transpose_delay_alloc: hires_transpose_delay_buf_alloc failed\n");
return -1;
}
ctx->first_kernel = 1;
return 0;
}
int hires_transpose_delay_buf_alloc (transpose_delay_hires_buf_t * buf, size_t buffer_size, size_t counter_size)
{
hipError_t error;
// allocate the buffer for data
error = hipMalloc (&(buf->d_buffer), buffer_size);
if (error != hipSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: hipMalloc failed for %ld bytes\n", buffer_size);
return -1;
}
buf->counter_size = counter_size;
buf->counter_bytes = counter_size * 3;
#ifdef USE_CONSTANT_MEMORY
error = hipHostMalloc (&(buf->h_out_from), buf->counter_size);
if (error != hipSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: hipHostMalloc failed for %ld bytes\n", buf->counter_size);
return -1;
}
error = hipHostMalloc (&(buf->h_in_from), buf->counter_size);
if (error != hipSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: hipHostMalloc failed for %ld bytes\n", buf->counter_size);
return -1;
}
error = hipHostMalloc (&(buf->h_in_to), buf->counter_size);
if (error != hipSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: hipHostMalloc failed for %ld bytes\n", buf->counter_size);
return -1;
}
#else
// allocate host memory for counters
error = hipHostMalloc (&(buf->h_base), buf->counter_bytes);
// setup 3 pointers for host memory
buf->h_out_from = (unsigned *) (buf->h_base + 0 * counter_size);
buf->h_in_from = (unsigned *) (buf->h_base + 1 * counter_size);
buf->h_in_to = (unsigned *) (buf->h_base + 2 * counter_size);
error = hipMalloc (&(buf->d_base), buf->counter_bytes);
buf->d_out_from = (unsigned *) (buf->d_base + 0 * counter_size);
buf->d_in_from = (unsigned *) (buf->d_base + 1 * counter_size);
buf->d_in_to = (unsigned *) (buf->d_base + 2 * counter_size);
#endif
buf->h_off = (unsigned *) malloc(buf->counter_size);
buf->h_delays = (unsigned *) malloc(buf->counter_size);
return 0;
}
void hires_transpose_delay_reset (transpose_delay_hires_t * ctx)
{
ctx->first_kernel = 1;
}
int hires_transpose_delay_dealloc (transpose_delay_hires_t * ctx)
{
hires_transpose_delay_buf_dealloc (ctx->curr);
hires_transpose_delay_buf_dealloc (ctx->next);
free (ctx->curr);
free (ctx->next);
return 0;
}
int hires_transpose_delay_buf_dealloc (transpose_delay_hires_buf_t * ctx)
{
#ifdef USE_CONSTANT_MEMORY
if (ctx->h_out_from)
hipHostFree (ctx->h_out_from);
ctx->h_out_from = 0;
if (ctx->h_in_from)
hipHostFree (ctx->h_in_from);
ctx->h_in_from = 0;
if (ctx->h_in_to)
hipHostFree (ctx->h_in_to);
ctx->h_in_to = 0;
#else
if (ctx->h_base)
hipHostFree (ctx->h_base);
ctx->h_base = 0;
if (ctx->d_base)
hipFree (ctx->d_base);
ctx->d_base = 0;
#endif
if (ctx->h_off)
free(ctx->h_off);
ctx->h_off = 0;
if (ctx->h_delays)
free(ctx->h_delays);
ctx->h_delays = 0;
if (ctx->d_buffer)
hipFree(ctx->d_buffer);
ctx->d_buffer =0;
return 0;
}
#ifdef USE_CONSTANT_MEMORY
__constant__ unsigned curr_out_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned curr_in_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned curr_in_to[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_out_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_in_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_in_to[MOPSR_PFB_ANT_MAX];
#endif
// major transpose kernel
// each block will process 32 time samples for 16 channels for all antenna
#ifdef USE_CONSTANT_MEMORY
__global__ void hires_transpose_delay_kernel (
int16_t * in,
int16_t * curr,
int16_t * next,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
#else
__global__ void hires_transpose_delay_kernel (
int16_t * in,
int16_t * curr,
int16_t * next,
unsigned * curr_counter,
unsigned * next_counter,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
#endif
{
// for loaded data samples
extern __shared__ int16_t sdata[];
const int nsamp_per_block = 32;
const int nchan_per_block = 16;
const int nchanant_per_block = nant * nchan_per_block;
const int warp_num = threadIdx.x / 32;
const int warp_idx = threadIdx.x & 0x1F; // % 32
// each warp reads a time sample, with the warp threads each reading the antenna and channels required
// offsets time sample offset + channel block offset + the chanant
unsigned idx = (blockIdx.x * nsamp_per_block + warp_num) * samp_stride + (blockIdx.y * chan_block_stride) + warp_idx;
// the right time sample in shm the chanant bank conflict trick
unsigned sdx = (nchanant_per_block * warp_num) + warp_idx;// + (warp_num * 2);
// read the TFS input to TFS shared memory
for (unsigned i=0; i<nval_per_thread; i++)
{
if (idx < nval)
{
sdata[sdx] = in[idx];
idx += 32;
sdx += 32;
}
}
__syncthreads();
// each warp will write out 32 time samples for a single antenna, for a number of channels
const int ant = warp_num % nant;
int ichan = nval_per_thread * (warp_num / nant);
int ichanant = ichan * nant + ant;
#ifdef USE_CONSTANT_MEMORY
// TODO try removing these references
const int curr_from = curr_in_from[ant];
const int curr_to = curr_in_to[ant];
const int curr_out = curr_out_from[ant] - curr_from;
const int next_from = next_in_from[ant];
const int next_to = next_in_to[ant];
const int next_out = next_out_from[ant] - next_from;
#else
const int curr_to = curr_counter[2*nant + ant];
const int curr_from = curr_counter[nant + ant];
const int curr_out = curr_counter[ant] - curr_from;
const int next_to = next_counter[2*nant + ant];
const int next_from = next_counter[nant + ant];
const int next_out = next_counter[ant] - next_from;
#endif
// offset for this thread in shared memory
// sample * sample_stride_in_shm + chanant offset + shm bank trick
sdx = (warp_idx * nant * nchan_per_block) + ichanant;// + (warp_idx * 2);
// output chanant for this warp
const int ochanant = (blockIdx.y * nchan_per_block * nant) + ichanant;
int osamp = (blockIdx.x * nsamp_per_block) + warp_idx;
int64_t odx = ochanant * out_chanant_stride + osamp;
// loop over channels
for (unsigned i=0; i<nval_per_thread; i++)
{
if (curr_from <= osamp && osamp < curr_to)
curr[odx + curr_out] = sdata[sdx];
if (next_from <= osamp && osamp < next_to)
next[odx + next_out] = sdata[sdx];
sdx += nant;
odx += out_chanant_stride * nant;
}
}
void * hires_transpose_delay (hipStream_t stream, transpose_delay_hires_t * ctx, void * d_in, uint64_t nbytes, mopsr_delay_hires_t ** delays)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// process 32 samples and 16 channels in a block
const unsigned nsamp_per_block = 32;
const unsigned nchan_per_block = 16;
const unsigned nchanblocks = ctx->nchan / nchan_per_block;
const unsigned nval_per_block = nsamp_per_block * nchan_per_block * ctx->nant;
const uint64_t nsamp = nbytes / (ctx->nchan * ctx->nant * ndim);
unsigned iant;
int shift;
const unsigned ichan = 0;
for (iant=0; iant < ctx->nant; iant++)
{
if (delays[iant][ichan].samples < ctx->half_ntap)
{
fprintf (stderr, "ERROR: [%d] delay in samples[%u] is less than ntap/2[%u]\n", iant, delays[iant][ichan].samples, ctx->half_ntap);
return 0;
}
if (ctx->first_kernel)
{
ctx->curr->h_delays[iant] = delays[iant][ichan].samples;
ctx->next->h_delays[iant] = delays[iant][ichan].samples;
ctx->curr->h_out_from[iant] = 0;
ctx->curr->h_in_from[iant] = ctx->curr->h_delays[iant] - ctx->half_ntap;
ctx->curr->h_in_to[iant] = nsamp;
ctx->curr->h_off[iant] = ctx->curr->h_in_to[iant] - ctx->curr->h_in_from[iant];
// should never be used on first iteration
ctx->next->h_out_from[iant] = 0;
ctx->next->h_in_from[iant] = nsamp;
ctx->next->h_in_to[iant] = 2 * nsamp;
}
else
{
// curr always uses delays from previous iteration
ctx->curr->h_out_from[iant] = ctx->curr->h_off[iant];
ctx->curr->h_in_from[iant] = 0;
ctx->curr->h_in_to[iant] = nsamp + (2 * ctx->half_ntap) - ctx->curr->h_off[iant];
if (nsamp + (2 * ctx->half_ntap) < ctx->curr->h_off[iant])
ctx->curr->h_in_to[iant] = 0;
// next always uses new delays
ctx->next->h_out_from[iant] = 0;
ctx->next->h_in_from[iant] = ctx->curr->h_in_to[iant] - (2 * ctx->half_ntap);
ctx->next->h_in_to[iant] = nsamp;
// handle a change in sample level delay this should be right
shift = delays[iant][ichan].samples - ctx->curr->h_delays[iant];
ctx->next->h_in_from[iant] += shift;
ctx->next->h_delays[iant] = delays[iant][ichan].samples;
ctx->next->h_off[iant] = ctx->next->h_in_to[iant] - ctx->next->h_in_from[iant];
}
}
/*
*/
#ifdef USE_CONSTANT_MEMORY
hipMemcpyToSymbolAsync(curr_out_from, (void *) ctx->curr->h_out_from, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipMemcpyToSymbolAsync(curr_in_from, (void *) ctx->curr->h_in_from, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipMemcpyToSymbolAsync(curr_in_to, (void *) ctx->curr->h_in_to, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipMemcpyToSymbolAsync(next_out_from, (void *) ctx->next->h_out_from, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipMemcpyToSymbolAsync(next_in_from, (void *) ctx->next->h_in_from, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipMemcpyToSymbolAsync(next_in_to, (void *) ctx->next->h_in_to, ctx->curr->counter_size, 0, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
#else
hipMemcpyAsync (ctx->curr->d_base, ctx->curr->h_base, ctx->curr->counter_bytes, hipMemcpyHostToDevice, stream);
hipMemcpyAsync (ctx->next->d_base, ctx->next->h_base, ctx->next->counter_bytes, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
#endif
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / ndim;
// the total number of samples is
dim3 blocks = dim3 (nsamp / nsamp_per_block, nchanblocks);
if (nsamp % nsamp_per_block)
blocks.x++;
const size_t sdata_bytes = (nsamp_per_block * nchan_per_block * ctx->nant * ndim) + 256;
// nbytes of bytes different (for input) between each block of data
const unsigned samp_stride = ctx->nchan * ctx->nant;
const unsigned chan_block_stride = nchan_per_block * ctx->nant;
const unsigned out_chanant_stride = nsamp + (2 * ctx->half_ntap);
#ifdef _GDEBUG
fprintf (stderr, "transpose_delay: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "transpose_delay: nbytes=%lu, nsamp=%lu, nval=%lu\n", nbytes, nsamp, nval);
fprintf (stderr, "transpose_delay: nthread=%d, blocks=(%d,%d,%d) sdata_bytes=%d\n", nthread, blocks.x, blocks.y, blocks.z, sdata_bytes);
fprintf (stderr, "transpose_delay: out_chanant_stride=%u\n", out_chanant_stride);
#endif
#ifdef USE_CONSTANT_MEMORY
hipLaunchKernelGGL(( hires_transpose_delay_kernel), dim3(blocks),dim3(nthread),sdata_bytes,stream, (int16_t *) d_in,
(int16_t *) ctx->curr->d_buffer, (int16_t *) ctx->next->d_buffer,
ctx->nchan, ctx->nant, nval, nval_per_thread, samp_stride, chan_block_stride, out_chanant_stride);
#else
hipLaunchKernelGGL(( hires_transpose_delay_kernel), dim3(blocks),dim3(nthread),sdata_bytes,stream, (int16_t *) d_in,
(int16_t *) ctx->curr->d_buffer, (int16_t *) ctx->next->d_buffer,
(unsigned *) ctx->curr->d_base, (unsigned *) ctx->next->d_base,
ctx->nchan, ctx->nant, nval, nval_per_thread, samp_stride, chan_block_stride, out_chanant_stride);
#endif
#if _GDEBUG
check_error_stream("hires_transpose_delay_kernel", stream);
#endif
if (ctx->first_kernel)
{
ctx->first_kernel = 0;
return 0;
}
else
{
transpose_delay_hires_buf_t * save = ctx->curr;
ctx->curr = ctx->next;
ctx->next = save;
return save->d_buffer;
}
}
#ifdef USE_CONSTANT_MEMORY
// fringe co-efficients are fast in constant memory here
__constant__ float fringe_coeffs[MOPSR_PFB_CHANANT_MAX];
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_fringe_rotate_kernel (int16_t * input, uint64_t ndat)
#else
__global__ void hires_fringe_rotate_kernel (int16_t * input, uint64_t ndat,
const float * __restrict__ d_fringes,
const float * __restrict__ d_ant_scales_delay)
#endif
{
const unsigned isamp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t idx = ichanant * ndat + isamp;
if (isamp >= ndat)
return;
cuFloatComplex fringe_phasor;
#ifdef USE_CONSTANT_MEMORY
// using constant memory should result in broadcast for this block/half warp
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#else
sincosf (d_fringes[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#endif
int16_t val16 = input[idx];
int8_t * val8ptr = (int8_t *) &val16;
const float scale = d_ant_scales_delay[iant];
float re = ((float) (val8ptr[0]) + 0.38) * scale;
float im = ((float) (val8ptr[1]) + 0.38) * scale;
hipComplex val = make_cuComplex (re, im);
hipComplex rotated = cuCmulf(val, fringe_phasor);
// output from signal processing, should have 0 mean data
// i.e. we range from -128 to 127
val8ptr[0] = (int8_t) rintf (cuCrealf(rotated));
val8ptr[1] = (int8_t) rintf (cuCimagf(rotated));
input[idx] = val16;
}
//
// Perform fractional delay correction, out-of-place
//
void hires_fringe_rotate (hipStream_t stream, void * d_in,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#else
void * d_fringes,
void * d_ant_scales,
#endif
uint64_t nbytes, unsigned nchan,
unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
// number of threads that actually load data
unsigned nthread = 1024;
dim3 blocks (ndat / nthread, nant, nchan);
if (ndat % nthread)
blocks.x++;
#ifdef USE_CONSTANT_MEMORY
hipMemcpyToSymbolAsync(fringe_coeffs, (void *) h_fringes, fringes_size, 0, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
#endif
#if _GDEBUG
fprintf (stderr, "fringe_rotate: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "fringe_rotate: nthread=%d, blocks.x=%d, blocks.y=%d, blocks.z=%d\n", nthread, blocks.x, blocks.y, blocks.z);
#endif
#ifdef USE_CONSTANT_MEMORY
hipLaunchKernelGGL(( hires_fringe_rotate_kernel), dim3(blocks), dim3(nthread), 0, stream, (int16_t *) d_in, ndat);
#else
hipLaunchKernelGGL(( hires_fringe_rotate_kernel), dim3(blocks), dim3(nthread), 0, stream, (int16_t *) d_in, ndat, (float *) d_fringes, (float *) d_ant_scales);
#endif
#if _GDEBUG
check_error_stream("hires_fringe_rotate_kernel", stream);
#endif
}
#ifdef USE_CONSTANT_MEMORY
void hires_delay_copy_scales (hipStream_t stream, float * h_ant_scales, size_t nbytes)
{
hipMemcpyToSymbolAsync (d_ant_scales_delay, (void *) h_ant_scales, nbytes, 0, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
}
#endif
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_delay_fractional_kernel (int16_t * input, int16_t * output,
const float * __restrict__ fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
const float * __restrict__ d_fringes,
const float * __restrict__ d_ant_scales_delay,
#endif
unsigned nthread_run,
uint64_t nsamp_in,
const unsigned chan_stride,
const unsigned ant_stride,
const unsigned ntap)
{
// the input data for block are stored in blockDim.x values
extern __shared__ hipComplex fk_shared1[];
// the FIR filter stored in the final NTAP values
float * filter = (float *) (fk_shared1 + blockDim.x);
const unsigned half_ntap = (ntap / 2);
//const unsigned in_offset = 2 * half_ntap;
// iant blockIdx.y
// ichan blockIDx.z
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned ichanant = blockIdx.z * gridDim.y + blockIdx.y;
const unsigned nsamp_out = nsamp_in - ( 2 * half_ntap);
hipComplex fringe_phasor;
#ifdef USE_CONSTANT_MEMORY
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#else
sincosf (d_fringes[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#endif
// read in the FIR cofficients
if (threadIdx.x < ntap)
filter[threadIdx.x] = fir_coeffs[(ichanant * ntap) + threadIdx.x];
if (isamp >= nsamp_in)
{
return;
}
// each thread must also load its data from main memory here chan_stride + ant_stride
const unsigned in_data_idx = (ichanant * nsamp_in) + isamp;
// const unsigned out_data_idx = ichanant * nsamp_out + isamp;
int16_t val16 = input[in_data_idx];
int8_t * val8ptr = (int8_t *) &val16;
{
const float scale = d_ant_scales_delay[blockIdx.y];
hipComplex val = make_cuComplex (((float) (val8ptr[0])) + 0.38, ((float) (val8ptr[1])) + 0.38);
val.x *= scale;
val.y *= scale;
fk_shared1[threadIdx.x] = cuCmulf(val, fringe_phasor);
}
__syncthreads();
// there are 2 * half_ntap threads that dont calculate anything
if ((threadIdx.x < nthread_run) && (isamp < nsamp_out))
{
float re = 0;
float im = 0;
for (unsigned i=0; i<ntap; i++)
{
re += cuCrealf(fk_shared1[threadIdx.x + i]) * filter[i];
im += cuCimagf(fk_shared1[threadIdx.x + i]) * filter[i];
}
// input is -127.5 to -127.5, output is -128 to 127
val8ptr[0] = (int8_t) rintf (re);
val8ptr[1] = (int8_t) rintf (im);
output[ichanant * nsamp_out + isamp] = val16;
}
}
// calculate the filter coefficients for each channel and antenna
__global__ void hires_calculate_fir_coeffs (float * delays, float * fir_coeffs, unsigned ntap)
{
const unsigned half_ntap = ntap / 2;
const unsigned ichanant = blockIdx.x;
const float itap = (float) threadIdx.x;
const float filter_order = ntap - 1;
float x = itap - delays[ichanant];
// Hamming window filter http://users.spa.aalto.fi/vpv/publications/vesan_vaitos/ch3_pt1_fir.pdf
float window = 0.54 - 0.46 * cos (2.0 * M_PI * x / filter_order);
float sinc = 1;
if (x != half_ntap)
{
x -= half_ntap;
x *= M_PI;
sinc = sinf(x) / x;
}
fir_coeffs[(ichanant * ntap) + threadIdx.x] = sinc * window;
}
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_delay_fractional_float_kernel (int16_t * input,
cuFloatComplex * output, float * fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
float * fringe_coeffs,
#endif
unsigned nthread_run, uint64_t nsamp_in,
const unsigned chan_stride, const unsigned ant_stride,
const unsigned ntap)
{
extern __shared__ float fk_shared_filter[];
cuFloatComplex * in_shm = (cuFloatComplex *) (fk_shared_filter + ntap + 1);
const unsigned half_ntap = ntap / 2;
const unsigned in_offset = 2 * half_ntap;
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = ichan * nant + iant;
const unsigned nsamp_out = nsamp_in - in_offset;
// compute the complex term required for fringe stopping
cuFloatComplex fringe_phasor;
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
// read in the FIR cofficients
if (threadIdx.x < ntap)
{
fk_shared_filter[threadIdx.x] = fir_coeffs[(ichanant * ntap) + threadIdx.x];
}
// final block check for data input (not data output!)
if (isamp >= nsamp_in)
{
return;
}
// each thread must also load its data from main memory here chan_stride + ant_stride
const unsigned in_data_idx = (ichanant * nsamp_in) + isamp;
int16_t val16 = input[in_data_idx];
int8_t * val8ptr = (int8_t *) &val16;
cuFloatComplex val = make_cuComplex ((float) (val8ptr[0]) + 0.33, (float) (val8ptr[1]) + 0.33);
in_shm[threadIdx.x] = cuCmulf (val, fringe_phasor);
__syncthreads();
const unsigned osamp = (blockIdx.x * nthread_run) + threadIdx.x;
// there are 2 * half_ntap threads that dont calculate anything
if (threadIdx.x < nthread_run && osamp < nsamp_out)
{
cuFloatComplex sum = make_cuComplex(0,0);
for (unsigned i=0; i<ntap; i++)
{
val = in_shm[threadIdx.x + i];
val.x *= fk_shared_filter[i];
val.y *= fk_shared_filter[i];
sum = cuCaddf(sum, val);
}
unsigned ou_data_idx = (ichanant * nsamp_out) + osamp;
output[ou_data_idx] = sum;
}
}
//
// Perform fractional delay correction, out-of-place
//
void hires_delay_fractional (hipStream_t stream, void * d_in, void * d_out,
float * d_delays, float * d_fir_coeffs,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#else
void * d_fringes, void * d_ant_scales,
#endif
uint64_t nbytes, unsigned nchan,
unsigned nant, unsigned ntap)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
// number of threads that actually load data
unsigned nthread_load = 1024;
if (ndat < nthread_load)
nthread_load = ndat;
unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
const size_t sdata_bytes = (nthread_load * ndim + ntap) * sizeof(float);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_load)
blocks.x++;
#ifdef USE_CONSTANT_MEMORY
hipMemcpyToSymbolAsync (fringe_coeffs, (void *) h_fringes, fringes_size, 0, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
#endif
// calculate the FIR co-efficients to be use in the fractional delay
unsigned nthread = ntap;
unsigned nblock = nchan * nant;
hipLaunchKernelGGL(( hires_calculate_fir_coeffs), dim3(nblock),dim3(nthread),0,stream, (float *) d_delays, (float *) d_fir_coeffs, ntap);
#if _GDEBUG
check_error_stream("hires_calculate_fir_coeffs", stream);
#endif
#if _GDEBUG
fprintf (stderr, "delay_fractional: bytes=%lu ndat=%lu sdata_bytes=%ld\n", nbytes, ndat, sdata_bytes);
fprintf (stderr, "delay_fractional: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "delay_fractional: nthread_load=%d nthread_run=%d ntap=%d\n", nthread_load, nthread_run, ntap);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
#ifdef USE_CONSTANT_MEMORY
hipLaunchKernelGGL(( hires_delay_fractional_kernel), dim3(blocks), dim3(nthread_load), sdata_bytes, stream, (int16_t *) d_in, (int16_t *) d_out,
(float *) d_fir_coeffs, nthread_run, ndat, chan_stride, ant_stride, ntap);
#else
hipLaunchKernelGGL(( hires_delay_fractional_kernel), dim3(blocks), dim3(nthread_load), sdata_bytes, stream, (int16_t *) d_in, (int16_t *) d_out,
(float *) d_fir_coeffs, (float *) d_fringes, (float *) d_ant_scales, nthread_run, ndat, chan_stride, ant_stride, ntap);
#endif
#if _GDEBUG
check_error_stream("hires_delay_fractional_kernel", stream);
#endif
}
#ifdef HAVE_SHFL
__inline__ __device__
float warpReduceSumF(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float blockReduceSumF(float val)
{
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumF(val); // Each warp performs partial reduction
if (lane==0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSumF(val); //Final reduce within first warp
return val;
}
__inline__ __device__
float blockReduceSumFS(float * vals)
{
float val = vals[threadIdx.x];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumF(val); // Each warp performs partial reduction
if (lane==0) vals[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? vals[lane] : 0;
if (wid==0) val = warpReduceSumF(val); //Final reduce within first warp
return val;
}
__inline__ __device__
int warpReduceSumI(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
int blockReduceSumI(int val) {
static __shared__ int shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumI(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSumI(val); //Final reduce within first warp
return val;
}
#endif
// Compute the mean of the re and imginary compoents for
__global__ void hires_measure_means_kernel (cuFloatComplex * in, cuFloatComplex * means, const unsigned nval_per_thread, const uint64_t ndat)
{
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t in_offset = ichanant * ndat;
cuFloatComplex * indat = in + in_offset;
unsigned idx = threadIdx.x * nval_per_thread;
cuFloatComplex val;
float sum_re = 0;
float sum_im = 0;
int count = 0;
for (unsigned ival=0; ival<nval_per_thread; ival++)
{
if (idx < ndat)
{
val = indat[idx];
sum_re += val.x;
sum_im += val.y;
count++;
}
idx += blockDim.x;
}
#ifdef HAVE_SHFL
// compute via block reduce sum
sum_re = blockReduceSumF(sum_re);
sum_im = blockReduceSumF(sum_im);
count = blockReduceSumI(count);
#endif
if (threadIdx.x == 0)
{
means[ichanant].x = sum_re / count;
means[ichanant].y = sum_im / count;
}
}
//
// Compute the S1 and S2 sums for blocks of input data, writing the S1 and S2 sums out to Gmem
//
__global__ void hires_skcompute_kernel (cuFloatComplex * in, float * s1s, float * s2s, const unsigned nval_per_thread, const uint64_t ndat)
{
extern __shared__ float skc_shm[];
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t in_offset = ichanant * ndat;
// offset into the block for the current channel and antenna
cuFloatComplex * indat = in + in_offset;
unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) * nval_per_thread;
cuFloatComplex val;
float s1_sum = 0;
float s2_sum = 0;
float power;
for (unsigned ival=0; ival<nval_per_thread; ival++)
{
if (idx < ndat)
{
val = indat[idx];
power = (val.x * val.x) + (val.y * val.y);
s1_sum += power;
s2_sum += (power * power);
}
idx += blockDim.x;
}
#ifdef HAVE_SHFL
const unsigned warp_idx = threadIdx.x % 32;
const unsigned warp_num = threadIdx.x / 32;
s1_sum += __shfl_down (s1_sum, 16);
s1_sum += __shfl_down (s1_sum, 8);
s1_sum += __shfl_down (s1_sum, 4);
s1_sum += __shfl_down (s1_sum, 2);
s1_sum += __shfl_down (s1_sum, 1);
s2_sum += __shfl_down (s2_sum, 16);
s2_sum += __shfl_down (s2_sum, 8);
s2_sum += __shfl_down (s2_sum, 4);
s2_sum += __shfl_down (s2_sum, 2);
s2_sum += __shfl_down (s2_sum, 1);
if (warp_idx == 0)
{
skc_shm [warp_num] = s1_sum;
skc_shm [32+warp_num] = s2_sum;
}
__syncthreads();
if (warp_num == 0)
{
s1_sum = skc_shm [warp_idx];
s2_sum = skc_shm [32 + warp_idx];
s1_sum += __shfl_down (s1_sum, 16);
s1_sum += __shfl_down (s1_sum, 8);
s1_sum += __shfl_down (s1_sum, 4);
s1_sum += __shfl_down (s1_sum, 2);
s1_sum += __shfl_down (s1_sum, 1);
s2_sum += __shfl_down (s2_sum, 16);
s2_sum += __shfl_down (s2_sum, 8);
s2_sum += __shfl_down (s2_sum, 4);
s2_sum += __shfl_down (s2_sum, 2);
s2_sum += __shfl_down (s2_sum, 1);
}
#endif
if (threadIdx.x == 0)
{
// FST ordered
const unsigned out_idx = (ichanant * gridDim.x) + blockIdx.x;
//if (iant == 0 && ichan == 168)
// printf ("s1s[%u]=%f\n", out_idx, s1_sum);
s1s[out_idx] = s1_sum;
s2s[out_idx] = s2_sum;
}
}
void hires_test_skcompute (hipStream_t stream, void * d_in, void * d_s1s_out, void * d_s2s_out, unsigned nchan, unsigned nant, unsigned nbytes)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim * sizeof(float));
const unsigned nthreads = 1024;
const unsigned nval_per_thread = 1;
size_t shm_bytes = 64 * sizeof(float);
dim3 blocks (ndat / nthreads, nant, nchan);
if (ndat % nthreads)
blocks.x++;
//#ifdef _GDEBUG
fprintf (stderr, "hires_skcompute_kernel: bytes=%lu ndat=%lu shm_bytes=%ld\n", nbytes, ndat, shm_bytes);
fprintf (stderr, "hires_skcompute_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d, nthreads=%u\n", blocks.x, blocks.y, blocks.z, nthreads);
fprintf (stderr, "hires_skcompute_kernel: d_in=%p d_s1s_out=%p, d_s2s_out=%p nval_per_thread=%u, ndat_sk=%lu\n", d_in, d_s1s_out, d_s2s_out, nval_per_thread, ndat);
//#endif
hipLaunchKernelGGL(( hires_skcompute_kernel), dim3(blocks), dim3(nthreads), shm_bytes, stream, (cuFloatComplex *) d_in, (float *) d_s1s_out, (float *) d_s2s_out, nval_per_thread, ndat);
check_error_stream("hires_skcompute_kernel", stream);
}
__device__ inline void Comparator(
float &valA,
float &valB,
uint dir
)
{
float k;
if ((valA > valB) == dir)
{
k = valA;
valA = valB;
valB = k;
}
}
__device__ inline void shm_merge_sort (unsigned length, float * keys)
{
const unsigned maxthread = length / 2;
for (uint size = 2; size <= length; size <<= 1)
{
uint stride = size / 2;
uint offset = threadIdx.x & (stride - 1);
{
__syncthreads();
if (threadIdx.x < maxthread)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator (keys[pos + 0], keys[pos + stride], 1);
}
stride >>= 1;
}
for (; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
if (threadIdx.x < maxthread)
{
if (offset >= stride)
{
Comparator( keys[pos - stride], keys[pos + 0], 1);
}
}
}
}
__syncthreads();
}
// simplistic block wide shared memory sum, 1 val per thread
__device__ inline float shm_sum_thread (unsigned length, float * keys)
{
for (unsigned size=length/2; size>0; size >>= 1)
{
if (threadIdx.x < size)
keys[threadIdx.x] += keys[threadIdx.x + size];
__syncthreads();
}
return keys[0];
}
__global__ void shm_merge_sort_kernel2 (float *d_Dst,
float *d_Src,
unsigned arrayLength,
unsigned dir)
{
//Shared memory storage for one or more small vectors
__shared__ float keys[1024];
keys[threadIdx.x] = d_Src[threadIdx.x];
__syncthreads();
shm_merge_sort (arrayLength, keys);
__syncthreads();
d_Dst[threadIdx.x] = keys[threadIdx.x];
}
void test_merge_sort2 (hipStream_t stream, float * d_key_out, float * d_key_in, unsigned length, unsigned direction) {
unsigned nblocks = 1;
unsigned nthreads = length;
hipLaunchKernelGGL(( shm_merge_sort_kernel2), dim3(nblocks), dim3(nthreads), 0, 0, d_key_out, d_key_in, length, direction);
check_error_stream("shm_merge_sort_kernel2", stream);
return;
}
__global__ void hires_compute_sigmas_kernel (float * in, cuFloatComplex * thresholds,
float * voltage_sigmas, unsigned nsums)
{
extern __shared__ float csk_keys[];
// iant = blockIdx.y;
// nant = gridDim.y;
// ichan = blockIdx.z;
// nchan = gridDim.z;
const unsigned ichanant = (blockIdx.z * gridDim.y) + blockIdx.y;
float s1 = in[(ichanant * nsums) + threadIdx.x];
// read the 16 input values into shared memory
csk_keys[threadIdx.x] = s1;
__syncthreads();
// sort using shared memory
shm_merge_sort (nsums, csk_keys);
__syncthreads();
float median = csk_keys[nsums / 2];
__syncthreads();
// now subtract median from s1 value in key and take abs value
csk_keys[threadIdx.x] = fabsf(csk_keys[threadIdx.x] - median);
__syncthreads();
// now sort again
shm_merge_sort (nsums, csk_keys);
__syncthreads();
// convert median absolute deviation to standard deviation
float sigma = csk_keys[nsums / 2] * 1.4826;
// set the thresholds
if (threadIdx.x == 0)
{
thresholds[ichanant].x = median;
thresholds[ichanant].y = sigma;
}
csk_keys[threadIdx.x] = s1;
__syncthreads();
// simple sum whereby nsums == nthreads
s1 = shm_sum_thread (nsums, csk_keys);
if (threadIdx.x == 0)
{
voltage_sigmas[ichanant] = sqrtf (s1 / (nsums * 1024 * 2));
}
}
/*
*/
__global__ void hires_compute_power_limits_kernel (float * in, cuFloatComplex * thresholds,
float * voltage_sigmas, int8_t * mask, hiprandStatePhilox4_32_10_t * rstates, unsigned nsums, unsigned valid_memory,
unsigned nsigma, unsigned iblock)
{
// iant = blockIdx.y;
// nant = gridDim.y;
// ichan = blockIdx.z;
// nchan = gridDim.z;
//const unsigned n_elements = nsums * valid_memory;
// 1024 threads, 16 samples/block, 64 memory blocks, each thread does 1 samples
const unsigned mem_block = threadIdx.x / 16;
const unsigned mem_element = threadIdx.x % 16;
const unsigned ichanant = (blockIdx.z * gridDim.y) + blockIdx.y;
const unsigned nchanant = gridDim.z * gridDim.y;
// get the generator for this channel and antenna [gridDim.x == 1]
const unsigned id = ichanant * blockDim.x + threadIdx.x;
// a maximum of 32 * 32 keys [1024] will be handled by 1024 threads.
__shared__ float keys[1024];
// existing median and sigma for the S1s
float median = thresholds[ichanant].x;
float sigma = thresholds[ichanant].y;
float s1 = 0;
float s1_count = 0;
// S1 values stored as 64 sets of FST in blocks that are each 16 samples
// ichanant offset + offset into the memory [0-16]
if (mem_block < valid_memory)
{
s1 = in[(mem_block * nchanant * 16) + (ichanant * 16) + mem_element];
s1_count = 1;
// if skdetect has determined this sample is bad, generate something similar
if ((mem_block == iblock) && (mask[ichanant * 16 + mem_element] > 0))
{
s1 = median + (hiprand_normal (&(rstates[id])) * sigma);
in[(mem_block * nchanant * 16) + (ichanant * 16) + mem_element] = s1;
}
}
// now find the median and median absolute deviation (stddev)
keys[threadIdx.x] = s1;
__syncthreads();
// sort the nelements values using shared memory
shm_merge_sort (1024, keys);
__syncthreads();
unsigned centre = 1024 - ((valid_memory * 16) / 2);
median = keys[centre];
__syncthreads();
// now subtract median from s1 value in key and take abs value
if (s1 > 0)
keys[threadIdx.x] = fabsf(s1 - median);
else
keys[threadIdx.x] = 0;
__syncthreads();
// now sort again
shm_merge_sort (1024, keys);
__syncthreads();
// convert median absolute deviation to standard deviation
sigma = keys[centre] * 1.4826;
//if (blockIdx.z == 210 && blockIdx.y == 0 && iblock == 0 && threadIdx.x < 16)
// printf ("[%d] s1=%f centre=%u median=%f sigma=%f\n", threadIdx.x, s1, centre, median, sigma);
// now sum S1 across threads
#ifdef HAVE_SHFL
s1 += __shfl_down (s1, 16);
s1 += __shfl_down (s1, 8);
s1 += __shfl_down (s1, 4);
s1 += __shfl_down (s1, 2);
s1 += __shfl_down (s1, 1);
s1_count += __shfl_down (s1_count, 16);
s1_count += __shfl_down (s1_count, 8);
s1_count += __shfl_down (s1_count, 4);
s1_count += __shfl_down (s1_count, 2);
s1_count += __shfl_down (s1_count, 1);
#endif
unsigned warp_idx = threadIdx.x % 32;
unsigned warp_num = threadIdx.x / 32;
if (warp_idx == 0)
{
keys[warp_num] = s1;
keys[32+warp_num] = s1_count;
}
__syncthreads();
if (warp_num == 0)
{
s1 = keys[warp_idx];
s1_count = keys[32+warp_idx];
#ifdef HAVE_SHFL
s1 += __shfl_down (s1, 16);
s1 += __shfl_down (s1, 8);
s1 += __shfl_down (s1, 4);
s1 += __shfl_down (s1, 2);
s1 += __shfl_down (s1, 1);
s1_count += __shfl_down (s1_count, 16);
s1_count += __shfl_down (s1_count, 8);
s1_count += __shfl_down (s1_count, 4);
s1_count += __shfl_down (s1_count, 2);
s1_count += __shfl_down (s1_count, 1);
#endif
// this sigma is the stddev of the voltages (hence 1024 * 2)
if (warp_idx == 0)
{
//voltage_sigmas[ichanant] = sqrtf(s1 / (s1_count * 2048));
voltage_sigmas[ichanant] = sqrtf(median / 2048);
// now we have the median and sigma for the memory blocks of S1, compute the
// total power thresholds
thresholds[ichanant].x = median;
thresholds[ichanant].y = sigma;
}
}
}
void hires_test_compute_power_limits (hipStream_t stream, void * d_s1s, void * d_sigmas,
void * d_thresh, void * d_mask, unsigned nsums, unsigned nant, unsigned nchan, uint64_t ndat,
uint64_t s1_count, unsigned s1_memory, void * d_rstates)
{
dim3 blocks_skm (1, nant, nchan);
unsigned nthreads = 1024;
const unsigned nsigma = 4;
unsigned valid_memory = s1_memory;
if (s1_count < s1_memory)
valid_memory = (unsigned) s1_count;
#ifdef _DEBUG
fprintf (stderr, "test_compute_power_limits: d_s1s=%p d_thresh=%p\n", d_s1s, d_thresh);
fprintf (stderr, "test_compute_power_limits: nant=%u nchan=%u ndat=%lu\n", nant, nchan, ndat);
fprintf (stderr, "test_compute_power_limits: nsums=%u nmemory=%u nsigma=%u\n", nsums, valid_memory, nsigma);
#endif
hipLaunchKernelGGL(( hires_compute_power_limits_kernel), dim3(blocks_skm),dim3(nthreads),0,stream, (float *) d_s1s,
(cuFloatComplex *) d_thresh, (float *) d_sigmas, (int8_t *) d_mask, (hiprandStatePhilox4_32_10_t *) d_rstates, nsums, valid_memory, nsigma, 0);
check_error_stream("hires_compute_power_limits_kernel", stream);
}
//
// take the S1 and S2 values in sums.x and sums.y that were computed
// from M samples, and integrate of nsums blocks to
// compute a sk mask and zap
//
__global__ void hires_skdetect_kernel (float * s1s, float * s2s, cuFloatComplex * power_thresholds,
int8_t * mask, float * sigmas,
unsigned nchan_sum, unsigned sk_nsigma,
unsigned nsums, unsigned M, unsigned nval_per_thread)
{
// zap mask for each set of M samples
extern __shared__ int8_t smask_det[];
// maximum to be 16384 samples (20.97152 ms)
// unsigned sk_idx_max = 16;
// given the buffer sizes of 16384 samples, we shall not exceed 2^14
// 2^11 is an important one: 10.24us * 2048 samples == 20.97152 ms
// maximum to be 2048 samples (20.97152 ms)
unsigned sk_idx_max = 14;
// 3 sigma
const float sk_low[15] = { 0, 0, 0, 0, 0,
0.387702, 0.492078, 0.601904, 0.698159, 0.775046,
0.834186, 0.878879, 0.912209, 0.936770, 0.954684};
const float sk_high[15] = { 0, 0, 0, 0, 0,
2.731480, 2.166000, 1.762970, 1.495970, 1.325420,
1.216950, 1.146930, 1.100750, 1.069730, 1.048570};
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned nchan = gridDim.z;
const unsigned ichanant = (ichan * nant) + iant;
// ASSUME! nsums == nthreads
// initialize zap mask to 0 in shared memory [FT order]
unsigned idx = threadIdx.x;
for (unsigned i=0; i<nchan_sum; i++)
{
smask_det[idx] = 0;
idx += nsums;
}
__syncthreads();
// log2 of 1024
const unsigned log2_M = (unsigned) log2f (M);
// S1 and S2 sums are stored as FST
s1s += (ichanant * nsums);
s2s += (ichanant * nsums);
idx = threadIdx.x;
if (!((ichan == 54 || ichan == 105 || ichan == 155 || ichan == 204)))
nchan_sum = 1;
// for each different boxcar width
for (unsigned sk_idx = log2_M; sk_idx < sk_idx_max; sk_idx ++)
{
// the number of S1 (1024 powers) to add to this boxcar
const unsigned to_add = (unsigned) exp2f (sk_idx - log2_M);
// prevent running over the end of the array
if (idx + to_add <= nsums)
{
const float m = (float) (M * to_add);
const float m_fac = (m + 1) / (m - 1);
// the number of channels that are considered bad
// 2 sigma == 9 channels
// 1 sigma == 25 channels
unsigned nchan_bad_count = 0;
const unsigned nchan_bad_limit = 12;
float sk_avg = 0;
unsigned cdx = idx;
// loop over the channels in our sum
for (unsigned i=ichan; i<(ichan+nchan_sum); i++)
{
const unsigned ica = i * nant + iant;
const float median = power_thresholds[ica].x;
const float sigma = power_thresholds[ica].y; // / sqrtf(to_add);
const float chan_sum_limit = 2 * sigma;
const float power_limit = 3 * sigma;
// compute the SK estimate for this boxcar width and channel
float s1 = 1e-10;
float s2 = 1e-10;
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
s1 += s1s[cdx + ichunk];
s2 += s2s[cdx + ichunk];
}
float sk_estimate = m_fac * (m * (s2 / (s1 * s1)) - 1);
sk_avg += sk_estimate;
float s1_avg = s1 / to_add;
// test the SK estimate for only the current channel
if (i == ichan)
{
if ((sk_estimate < sk_low[sk_idx]) || (sk_estimate > sk_high[sk_idx]))
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[idx+ichunk] = (int8_t) 1;
}
}
// test if the average S1 power exceeds the 3sigma limits from the long running median/sigma
if ((s1_avg > (median + power_limit)) || (s1_avg < (median - power_limit)))
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[idx+ichunk] = 3;
}
}
}
// phone call detector
// test if the average S1 power exceeds the special limit for channel summation
if (s1_avg > (median + chan_sum_limit))
nchan_bad_count ++;
// increment by 1 channel
cdx += (nant * nsums);
}
// if we this is a phone call band, check the limits on the SK Average and nchan_bad
if (nchan_sum == 50)
{
#ifdef SKAVG_METHOD
float mu2 = (4 * m * m) / ((m-1) * (m + 2) * (m + 3));
float one_sigma_idat = sqrtf(mu2 / nchan_sum);
float upper = 1 + (sk_nsigma * one_sigma_idat);
float lower = 1 - (sk_nsigma * one_sigma_idat);
sk_avg /= nchan_sum;
if ((sk_avg < lower) || (sk_avg > upper) || (nchan_bad_count > nchan_bad_limit))
#else
if (nchan_bad_count > nchan_bad_limit)
#endif
{
cdx = idx;
for (unsigned i=0; i<nchan_sum; i++)
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[cdx+ichunk] = 2;
}
cdx += nsums;
}
}
}
}
}
// now write out the SK mask to gmem
for (unsigned i=0; i < nchan_sum; i++)
{
if ((ichan + i) < nchan)
{
unsigned odx = (((ichan + i) * nant) + iant) * nsums + threadIdx.x;
unsigned sdx = i * nsums + threadIdx.x;
if ((sdx < nchan_sum * nsums) && (smask_det[sdx] > 0))
{
mask[odx] = smask_det[sdx];
}
}
}
}
void hires_test_skdetect (hipStream_t stream, void * d_s1s, void * d_s2s, void * d_thresh,
void * d_mask, void * d_sigmas, unsigned nsums, unsigned nant,
unsigned nchan, uint64_t ndat)
{
unsigned M = 1024;
//////////////////////////////////////////////////////////
// mask the input data
dim3 blocks (1, nant, nchan);
unsigned nthreads = 1024;
unsigned nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nchan_sum = 50;
unsigned sk_nsigma = 4;
size_t shm_bytes = (nchan_sum + 1) * nsums * sizeof(uint8_t);
size_t mask_size = nsums * nchan * nant * sizeof(uint8_t);
hipMemsetAsync (d_mask, 0, mask_size, stream);
hipStreamSynchronize(stream);
fprintf (stderr, "hires_skdetect_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_skdetect_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skdetect_kernel: d_s1s=%p, d_s2s=%p, d_masks=%p, nsums=%u, M=%u, nval_per_thread=%u\n", d_s1s, d_s2s, d_mask, nsums, M, nval_per_thread);
hipLaunchKernelGGL(( hires_skdetect_kernel), dim3(blocks), dim3(nthreads), shm_bytes, stream, (float *) d_s1s, (float *) d_s2s, (cuFloatComplex *) d_thresh, (int8_t *) d_mask, (float *) d_sigmas, nchan_sum, sk_nsigma, nsums, M, nval_per_thread);
check_error_stream("hires_skdetect_kernel", stream);
}
//
// take the S1 and S2 values in sums.x and sums.y that were computed
// from M samples, and integrate of nsums blocks to
// compute a sk mask and zap
//
__global__ void hires_skmask_kernel (float * in, int8_t * out, int8_t * mask,
hiprandStatePhilox4_32_10_t * rstates, float * sigmas,
#ifndef USE_CONSTANT_MEMORY
const float * __restrict__ d_ant_scales_delay,
#endif
unsigned nsums, unsigned M, unsigned nval_per_thread,
unsigned nsamp_per_thread, uint64_t ndat,
char replace_noise)
{
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const unsigned id = ichanant * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t localState = rstates[id];
float sigma = sigmas[ichanant];
int8_t * chanant_mask = mask + (ichanant * nsums);
// Jenet & Anderson 1998, 6-bit (2-bits for RFI) spacing
const float spacing = 0.09925; // 6-bit
//const float spacing = 0.02957; // 8-bit
// dont do antenna scaling here anymore for the moment, unless it is zero
const float ant_scale = d_ant_scales_delay[iant];
float data_factor = ant_scale / (sigma * spacing);
float rand_factor = ant_scale / spacing;
if (!replace_noise)
rand_factor = 0;
// now we want to zap all blocks of input that have an associated mask
// note that this kernel has only 1 block, with blockDim.x threads that may not match
const unsigned ndim = 2;
const unsigned nval_per_sum = M * ndim;
unsigned block_offset = (ichanant * ndat * ndim);
float * indat = in + block_offset;
int8_t * outdat = out + block_offset;
// foreach block of M samples (i.e. 1 sum)
for (unsigned isum=0; isum<nsums; isum++)
{
// use the threads to write out the int8_t scaled value (or zapped value)
// back to global memory. There are 2 * M values to write each iteration
unsigned idx = threadIdx.x;
#ifdef SHOW_MASK
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++)
{
if (idx < nval_per_sum)
{
outdat[idx] = (int8_t) chanant_mask[isum];
}
idx += blockDim.x;
}
#else
if (chanant_mask[isum] > 0)
{
// it is more efficient to generate 4 floats at a time
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp+=4)
{
const float4 inval = hiprand_normal4 (&localState);
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.x * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.y * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.z * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.w * rand_factor);
idx += blockDim.x;
}
}
else
{
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++)
{
if (idx < nval_per_sum)
{
outdat[idx] = (int8_t) rintf (indat[idx] * data_factor);
}
idx += blockDim.x;
}
}
#endif
outdat += ndim * M;
indat += ndim * M;
}
rstates[id] = localState;
}
void hires_test_skmask (hipStream_t stream, void * d_in, void * d_out, void * d_mask, void * d_rstates, void * d_sigmas,
#ifndef USE_CONSTANT_MEMORY
void * d_ant_scales_delay,
#endif
unsigned nsums, unsigned nchan, unsigned nant, uint64_t ndat, char replace_noise)
{
unsigned M = 1024;
unsigned ndim = 2;
//////////////////////////////////////////////////////////
// mask the input data
dim3 blocks (1, nant, nchan);
unsigned nthreads = 1024;
unsigned nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nsamp_per_thread = (M * ndim) / nthreads;
if (M % nthreads)
nsamp_per_thread++;
size_t shm_bytes = 0;
fprintf (stderr, "hires_skmask_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_skmask_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skmask_kernel: d_in=%p d_out=%p, d_mask=%p, nsums=%u M=%u, nval_per_thread=%u, nsamp_per_thread=%u ndat=%lu\n", d_in, d_out, d_in, nsums, M, nval_per_thread, nsamp_per_thread, ndat);
hipLaunchKernelGGL(( hires_skmask_kernel), dim3(blocks), dim3(nthreads), shm_bytes, stream, (float *) d_in, (int8_t *) d_out,
(int8_t *) d_mask, (hiprandStatePhilox4_32_10_t *) d_rstates, (float *) d_sigmas,
#ifndef USE_CONSTANT_MEMORY
(float *) d_ant_scales_delay,
#endif
nsums, M, nval_per_thread, nsamp_per_thread, ndat, replace_noise);
check_error_stream("hires_skmask_kernel", stream);
}
__global__ void hires_srand_setup_kernel_sparse (unsigned long long seed, unsigned pfb_idx, unsigned nrngs, unsigned nval_per_thread, hiprandStatePhilox4_32_10_t * rstates)
{
unsigned id = threadIdx.x;
unsigned long long sequence = (blockDim.x * pfb_idx) + threadIdx.x;
unsigned long long local_seed = seed;
unsigned long long offset = 0;
unsigned long long skip = nrngs;
hiprandStatePhilox4_32_10_t local_state;
hiprand_init (local_seed, sequence, offset, &local_state);
rstates[id] = local_state;
id += blockDim.x;
for (unsigned i=1; i<nval_per_thread; i++)
{
skipahead_sequence (skip, &local_state);
rstates[id] = local_state;
id += blockDim.x;
}
}
void hires_init_rng_sparse (hipStream_t stream, unsigned long long seed, unsigned nrngs, unsigned pfb_idx, unsigned npfb, void * states)
{
unsigned nthreads = 1024;
unsigned nval_per_thread = nrngs / nthreads;
#if _GDEBUG
fprintf (stderr, "rand_setup: nrngs=%u nval_per_thread=%u nthreads=%u pfb_idx=%u\n", nrngs, nval_per_thread, nthreads, pfb_idx);
#endif
hipLaunchKernelGGL(( hires_srand_setup_kernel_sparse), dim3(1), dim3(nthreads), 0, stream, seed, pfb_idx, nrngs, nval_per_thread, (hiprandStatePhilox4_32_10_t *) states);
#if _GDEBUG
check_error_stream("hires_srand_setup_kernel", stream);
#endif
}
__global__ void hires_srand_setup_kernel (unsigned long long seed, unsigned pfb_idx, unsigned npfb, hiprandStatePhilox4_32_10_t *states)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
// local seed will be different for each of NCHAN * NANT * 1024 generators
// sequence 0, since each sequence increment involves 2^67 steps through the RNG sequence!
// offset is the step through the random sequence
unsigned long long sequence = 0;
unsigned long long offset = id;
unsigned long long local_seed = (seed << 20) + id;
// more efficient, since moving along the sequence of a seed is expensive
hiprand_init (local_seed, sequence, offset, &states[id]);
//hiprand_init( (seed << 20) + id, 0, 0, &states[id]);
}
void hires_init_rng (hipStream_t stream, unsigned long long seed, unsigned nrngs, unsigned pfb_idx, unsigned npfb, void * states)
{
unsigned nthreads = 1024;
unsigned nblocks = nrngs / nthreads;
#if _GDEBUG
fprintf (stderr, "rand_setup: nblocks=%u nthreads=%u\n", nblocks, nthreads);
#endif
hipLaunchKernelGGL(( hires_srand_setup_kernel), dim3(nblocks), dim3(nthreads), 0, stream, seed, pfb_idx, npfb, (hiprandStatePhilox4_32_10_t *) states);
#if _GDEBUG
check_error_stream("hires_srand_setup_kernel", stream);
#endif
}
// out-of-place
//
void hires_delay_fractional_sk_scale (hipStream_t stream,
void * d_in, void * d_out, void * d_fbuf, void * d_rstates,
void * d_sigmas, void * d_mask, float * d_delays, void * d_fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
void * d_fringes, void * d_ant_scales,
#endif
void * d_s1s, void * d_s2s, void * d_thresh,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#endif
uint64_t nbytes, unsigned nchan, unsigned nant, unsigned ntap,
unsigned s1_memory, uint64_t s1_count, char replace_noise)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
#ifdef USE_CONSTANT_MEMORY
// copy the fringe coeffs and delays to GPU memory
hipMemcpyToSymbolAsync (fringe_coeffs, (void *) h_fringes, fringes_size, 0, hipMemcpyHostToDevice, stream);
hipStreamSynchronize(stream);
#endif
// calculate the FIT co-efficients to be use in the fractional delay
unsigned nthread = ntap;
unsigned nblock = nchan * nant;
hipLaunchKernelGGL(( hires_calculate_fir_coeffs), dim3(nblock),dim3(nthread),0,stream, (float *) d_delays, (float *) d_fir_coeffs, ntap);
#if _GDEBUG
check_error_stream("hires_calculate_fir_coeffs", stream);
#endif
// number of threads that actually load data
unsigned nthread_load = 1024;
if (ndat < nthread_load)
nthread_load = ndat;
unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
//const size_t sdata_bytes = (nthread_load * ndim + ntap) * sizeof(float);
const size_t sdata_bytes = (nthread_load * ndim + ntap + 1) * sizeof(float);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_load)
blocks.x++;
#if _GDEBUG
fprintf (stderr, "hires_delay_fractional_float_kernel: bytes=%lu ndat=%lu sdata_bytes=%ld\n", nbytes, ndat, sdata_bytes);
fprintf (stderr, "hires_delay_fractional_float_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_delay_fractional_float_kernel: nthread_load=%d nthread_run=%d ntap=%d\n", nthread_load, nthread_run, ntap);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
#ifdef USE_CONSTANT_MEMORY
hipLaunchKernelGGL(( hires_delay_fractional_float_kernel), dim3(blocks), dim3(nthread_load), sdata_bytes, stream, (int16_t *) d_in,
(cuFloatComplex *) d_fbuf, (float *) d_fir_coeffs, nthread_run,
ndat, chan_stride, ant_stride, ntap);
#else
hipLaunchKernelGGL(( hires_delay_fractional_float_kernel), dim3(blocks), dim3(nthread_load), sdata_bytes, stream, (int16_t *) d_in,
(cuFloatComplex *) d_fbuf, (float *) d_fir_coeffs, (float *) d_fringes,
nthread_run, ndat, chan_stride, ant_stride, ntap);
#endif
#if _GDEBUG
check_error_stream("hires_delay_fractional_float_kernel", stream);
#endif
/////////////////////////////////////////////////////////
// Calculate kurtosis sums
// TODO fix this configuration
unsigned M = 1024;
unsigned nthreads = 1024;
const uint64_t ndat_sk = ndat - (ntap - 1);
unsigned nval_per_thread = 1;
if (M > nthreads)
nval_per_thread = M / nthreads;
else
nthreads = M;
size_t shm_bytes;
// each block is a single integration
//shm_bytes = M * ndim * sizeof(float);
///////////////////////////////////////////////////////
// compute the means of each antenna / channel
//blocks.x = 1;
//shm_bytes = 0;
//unsigned nval_per_thread_mean = ndat_sk / 1024;
//hires_measure_means_kernel <<<blocks, nthreads, shm_bytes, stream>>>( (cuFloatComplex *) d_fbuf,
// (cuFloatComplex *) d_means, nval_per_thread_mean, ndat_sk);
///////////////////////////////////////////////////////
// compute the S1 and S2 values from the input
//
blocks.x = ndat_sk / M;
shm_bytes = 64 * sizeof(float);
#if _GDEBUG
fprintf (stderr, "hires_skcompute_kernel: bytes=%lu ndat=%lu shm_bytes=%ld\n", nbytes, ndat_sk, shm_bytes);
fprintf (stderr, "hires_skcompute_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d, nthreads=%u\n", blocks.x, blocks.y, blocks.z, nthreads);
fprintf (stderr, "hires_skcompute_kernel: d_fbuf=%p d_in=%p, nval_per_thread=%u, ndat_sk=%lu\n", d_fbuf, d_in, nval_per_thread, ndat_sk);
#endif
unsigned s1_idx = (unsigned) ((s1_count-1) % s1_memory);
float * d_s1s_curr = ((float *) d_s1s) + (s1_idx * blocks.x * nchan * nant);
// reuse d_in as a temporary work buffer for the S1 and S2 sums
hipLaunchKernelGGL(( hires_skcompute_kernel), dim3(blocks), dim3(nthreads), shm_bytes, stream, (cuFloatComplex *) d_fbuf, (float *) d_s1s_curr, (float *) d_s2s, nval_per_thread, ndat_sk);
#if _GDEBUG
check_error_stream("hires_skcompute_kernel", stream);
#endif
//
unsigned nsums = blocks.x;
dim3 blocks_skm (1, nant, nchan);
#ifdef MEDIAN_FILTER
/////////////////////////////////////////////////////////
// compute the power limits based on the S1 and S2 values
#ifdef _GDEBUG
fprintf (stderr, "ndat=%lu ndat_sk=%lu nsums=%u\n", ndat, ndat_sk, nsums);
fprintf (stderr, "s1_idx=%u s1_count=%u\n", s1_idx, s1_count);
#endif
const unsigned nsigma = 3;
unsigned valid_memory = s1_memory;
if (s1_count < s1_memory)
valid_memory = (unsigned) s1_count;
shm_bytes = 0;
// on first iteration, compute sigmas and thresholds
if (s1_count == 1)
{
nthreads = nsums;
shm_bytes = nthreads * sizeof(float);
hipLaunchKernelGGL(( hires_compute_sigmas_kernel), dim3(blocks_skm),dim3(nthreads),shm_bytes,stream, (float *) d_s1s, (cuFloatComplex *) d_thresh, (float *) d_sigmas, nsums);
#if _GDEBUG
check_error_stream("hires_compute_sigmas_kernel", stream);
#endif
}
#endif
//////////////////////////////////////////////////////////
// mask the input data
nthreads = 1024;
nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nsamp_per_thread = (M * ndim) / nthreads;
if (M % nthreads)
nsamp_per_thread++;
unsigned nchan_sum = 50;
unsigned sk_nsigma = 4;
shm_bytes = nchan_sum * nsums * sizeof(uint8_t);
size_t mask_size = nsums * nchan * nant * sizeof(uint8_t);
hipMemsetAsync (d_mask, 0, mask_size, stream);
hipStreamSynchronize(stream);
#if _GDEBUG
fprintf (stderr, "hires_skdetect_kernel: blocks_skm.x=%d, blocks_skm.y=%d, blocks_skm.z=%d\n", blocks_skm.x, blocks_skm.y, blocks_skm.z);
fprintf (stderr, "hires_skdetect_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skdetect_kernel: d_fbuf=%p d_out=%p, d_in=%p, nsums=%u M=%u, nval_per_thread=%u\n", d_fbuf, d_out, d_thresh, nsums, M, nval_per_thread);
#endif
hipLaunchKernelGGL(( hires_skdetect_kernel), dim3(blocks_skm), dim3(nthreads), shm_bytes, stream, d_s1s_curr, (float *) d_s2s, (cuFloatComplex *) d_thresh, (int8_t *) d_mask, (float *) d_sigmas, nchan_sum, sk_nsigma, nsums, M, nval_per_thread);
#if _GDEBUG
check_error_stream("hires_skdetect_kernel", stream);
#endif
shm_bytes = nchan_sum * nsums;
#if _GDEBUG
fprintf (stderr, "hires_skmask_kernel: blocks_skm.x=%d, blocks_skm.y=%d, "
"blocks_skm.z=%d\n", blocks_skm.x, blocks_skm.y, blocks_skm.z);
fprintf (stderr, "hires_skmask_kernel: nthreads=%u shm_bytes=%ld\n",
nthreads, shm_bytes);
fprintf (stderr, "hires_skmask_kernel: d_fbuf=%p d_out=%p, d_in=%p, nsums=%u "
"M=%u, nval_per_thread=%u, nsamp_per_thread=%u ndat_sk=%lu\n",
d_fbuf, d_out, d_in, nsums, M, nval_per_thread, nsamp_per_thread,
ndat_sk);
#endif
// now compute the power limits for a kernel, taking the mask into account, updating the thresholds and sigmas
unsigned nthreads_cpl = 1024;
shm_bytes = 0;
hipLaunchKernelGGL(( hires_compute_power_limits_kernel), dim3(blocks_skm),dim3(nthreads_cpl),shm_bytes,stream, (float *) d_s1s, (cuFloatComplex *) d_thresh, (float *) d_sigmas, (int8_t *) d_mask, (hiprandStatePhilox4_32_10_t *) d_rstates, nsums, valid_memory, nsigma, s1_idx);
shm_bytes = 0;
hipLaunchKernelGGL(( hires_skmask_kernel), dim3(blocks_skm), dim3(nthreads), shm_bytes, stream, (float *) d_fbuf, (int8_t *) d_out,
(int8_t *) d_mask, (hiprandStatePhilox4_32_10_t *) d_rstates, (float *) d_sigmas,
#ifndef USE_CONSTANT_MEMORY
(float *) d_ant_scales,
#endif
nsums, M, nval_per_thread, nsamp_per_thread, ndat_sk, replace_noise);
#if _GDEBUG
check_error_stream("hires_skmask_kernel", stream);
#endif
}
// wrapper for getting curandStatePhilox4_32_10_t_t size
size_t hires_curandState_size()
{
return sizeof(hiprandStatePhilox4_32_10_t);
}
|
a7f9e32026de7aac4e9944a3becba712c1408420.cu
|
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <curand_kernel.h>
#include <inttypes.h>
#include <stdio.h>
#include <assert.h>
#include "mopsr_cuda.h"
#include "mopsr_delays_cuda_hires.h"
// maximum number of channels [320] * antenna [16] from 1 PFB
#define MOPSR_PFB_ANT_MAX 8
#define MOPSR_PFB_CHANANT_MAX 2560
#define MOPSR_MAX_ANT 352
#define WARP_SIZE 32
#define MEDIAN_FILTER 1
#define TWO_SIGMA
//#define ONE_SIGMA
//#define SK_FREQ_AVG
//#define SHOW_MASK // this puts the SK/TP masks into the output data!
//#define _GDEBUG 1
#ifdef USE_CONSTANT_MEMORY
__constant__ float d_ant_scales_delay [MOPSR_MAX_NANT_PER_AQ];
#endif
int hires_transpose_delay_alloc (transpose_delay_hires_t * ctx,
uint64_t block_size, unsigned nchan,
unsigned nant, unsigned ntap)
{
ctx->nchan = nchan;
ctx->nant = nant;
ctx->ntap = ntap;
ctx->half_ntap = ntap / 2;
const unsigned nchanant = nchan * nant;
const unsigned ndim = 2;
ctx->curr = (transpose_delay_hires_buf_t *) malloc (sizeof(transpose_delay_hires_buf_t));
ctx->next = (transpose_delay_hires_buf_t *) malloc (sizeof(transpose_delay_hires_buf_t));
ctx->buffer_size = block_size + (ndim * nchanant * ctx->half_ntap * 2);
size_t counter_size = ctx->nant * sizeof(unsigned);
if (hires_transpose_delay_buf_alloc (ctx->curr, ctx->buffer_size, counter_size) < 0)
{
fprintf (stderr, "hires_transpose_delay_alloc: hires_transpose_delay_buf_alloc failed\n");
return -1;
}
if (hires_transpose_delay_buf_alloc (ctx->next, ctx->buffer_size, counter_size) < 0)
{
fprintf (stderr, "hires_transpose_delay_alloc: hires_transpose_delay_buf_alloc failed\n");
return -1;
}
ctx->first_kernel = 1;
return 0;
}
int hires_transpose_delay_buf_alloc (transpose_delay_hires_buf_t * buf, size_t buffer_size, size_t counter_size)
{
cudaError_t error;
// allocate the buffer for data
error = cudaMalloc (&(buf->d_buffer), buffer_size);
if (error != cudaSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: cudaMalloc failed for %ld bytes\n", buffer_size);
return -1;
}
buf->counter_size = counter_size;
buf->counter_bytes = counter_size * 3;
#ifdef USE_CONSTANT_MEMORY
error = cudaMallocHost (&(buf->h_out_from), buf->counter_size);
if (error != cudaSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: cudaMallocHost failed for %ld bytes\n", buf->counter_size);
return -1;
}
error = cudaMallocHost (&(buf->h_in_from), buf->counter_size);
if (error != cudaSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: cudaMallocHost failed for %ld bytes\n", buf->counter_size);
return -1;
}
error = cudaMallocHost (&(buf->h_in_to), buf->counter_size);
if (error != cudaSuccess)
{
fprintf (stderr, "hires_transpose_delay_buf_alloc: cudaMallocHost failed for %ld bytes\n", buf->counter_size);
return -1;
}
#else
// allocate host memory for counters
error = cudaMallocHost (&(buf->h_base), buf->counter_bytes);
// setup 3 pointers for host memory
buf->h_out_from = (unsigned *) (buf->h_base + 0 * counter_size);
buf->h_in_from = (unsigned *) (buf->h_base + 1 * counter_size);
buf->h_in_to = (unsigned *) (buf->h_base + 2 * counter_size);
error = cudaMalloc (&(buf->d_base), buf->counter_bytes);
buf->d_out_from = (unsigned *) (buf->d_base + 0 * counter_size);
buf->d_in_from = (unsigned *) (buf->d_base + 1 * counter_size);
buf->d_in_to = (unsigned *) (buf->d_base + 2 * counter_size);
#endif
buf->h_off = (unsigned *) malloc(buf->counter_size);
buf->h_delays = (unsigned *) malloc(buf->counter_size);
return 0;
}
void hires_transpose_delay_reset (transpose_delay_hires_t * ctx)
{
ctx->first_kernel = 1;
}
int hires_transpose_delay_dealloc (transpose_delay_hires_t * ctx)
{
hires_transpose_delay_buf_dealloc (ctx->curr);
hires_transpose_delay_buf_dealloc (ctx->next);
free (ctx->curr);
free (ctx->next);
return 0;
}
int hires_transpose_delay_buf_dealloc (transpose_delay_hires_buf_t * ctx)
{
#ifdef USE_CONSTANT_MEMORY
if (ctx->h_out_from)
cudaFreeHost (ctx->h_out_from);
ctx->h_out_from = 0;
if (ctx->h_in_from)
cudaFreeHost (ctx->h_in_from);
ctx->h_in_from = 0;
if (ctx->h_in_to)
cudaFreeHost (ctx->h_in_to);
ctx->h_in_to = 0;
#else
if (ctx->h_base)
cudaFreeHost (ctx->h_base);
ctx->h_base = 0;
if (ctx->d_base)
cudaFree (ctx->d_base);
ctx->d_base = 0;
#endif
if (ctx->h_off)
free(ctx->h_off);
ctx->h_off = 0;
if (ctx->h_delays)
free(ctx->h_delays);
ctx->h_delays = 0;
if (ctx->d_buffer)
cudaFree(ctx->d_buffer);
ctx->d_buffer =0;
return 0;
}
#ifdef USE_CONSTANT_MEMORY
__constant__ unsigned curr_out_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned curr_in_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned curr_in_to[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_out_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_in_from[MOPSR_PFB_ANT_MAX];
__constant__ unsigned next_in_to[MOPSR_PFB_ANT_MAX];
#endif
// major transpose kernel
// each block will process 32 time samples for 16 channels for all antenna
#ifdef USE_CONSTANT_MEMORY
__global__ void hires_transpose_delay_kernel (
int16_t * in,
int16_t * curr,
int16_t * next,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
#else
__global__ void hires_transpose_delay_kernel (
int16_t * in,
int16_t * curr,
int16_t * next,
unsigned * curr_counter,
unsigned * next_counter,
const unsigned nchan, const unsigned nant,
const unsigned nval, const unsigned nval_per_thread,
const unsigned samp_stride, const unsigned chan_block_stride,
const unsigned out_chanant_stride)
#endif
{
// for loaded data samples
extern __shared__ int16_t sdata[];
const int nsamp_per_block = 32;
const int nchan_per_block = 16;
const int nchanant_per_block = nant * nchan_per_block;
const int warp_num = threadIdx.x / 32;
const int warp_idx = threadIdx.x & 0x1F; // % 32
// each warp reads a time sample, with the warp threads each reading the antenna and channels required
// offsets time sample offset + channel block offset + the chanant
unsigned idx = (blockIdx.x * nsamp_per_block + warp_num) * samp_stride + (blockIdx.y * chan_block_stride) + warp_idx;
// the right time sample in shm the chanant bank conflict trick
unsigned sdx = (nchanant_per_block * warp_num) + warp_idx;// + (warp_num * 2);
// read the TFS input to TFS shared memory
for (unsigned i=0; i<nval_per_thread; i++)
{
if (idx < nval)
{
sdata[sdx] = in[idx];
idx += 32;
sdx += 32;
}
}
__syncthreads();
// each warp will write out 32 time samples for a single antenna, for a number of channels
const int ant = warp_num % nant;
int ichan = nval_per_thread * (warp_num / nant);
int ichanant = ichan * nant + ant;
#ifdef USE_CONSTANT_MEMORY
// TODO try removing these references
const int curr_from = curr_in_from[ant];
const int curr_to = curr_in_to[ant];
const int curr_out = curr_out_from[ant] - curr_from;
const int next_from = next_in_from[ant];
const int next_to = next_in_to[ant];
const int next_out = next_out_from[ant] - next_from;
#else
const int curr_to = curr_counter[2*nant + ant];
const int curr_from = curr_counter[nant + ant];
const int curr_out = curr_counter[ant] - curr_from;
const int next_to = next_counter[2*nant + ant];
const int next_from = next_counter[nant + ant];
const int next_out = next_counter[ant] - next_from;
#endif
// offset for this thread in shared memory
// sample * sample_stride_in_shm + chanant offset + shm bank trick
sdx = (warp_idx * nant * nchan_per_block) + ichanant;// + (warp_idx * 2);
// output chanant for this warp
const int ochanant = (blockIdx.y * nchan_per_block * nant) + ichanant;
int osamp = (blockIdx.x * nsamp_per_block) + warp_idx;
int64_t odx = ochanant * out_chanant_stride + osamp;
// loop over channels
for (unsigned i=0; i<nval_per_thread; i++)
{
if (curr_from <= osamp && osamp < curr_to)
curr[odx + curr_out] = sdata[sdx];
if (next_from <= osamp && osamp < next_to)
next[odx + next_out] = sdata[sdx];
sdx += nant;
odx += out_chanant_stride * nant;
}
}
void * hires_transpose_delay (cudaStream_t stream, transpose_delay_hires_t * ctx, void * d_in, uint64_t nbytes, mopsr_delay_hires_t ** delays)
{
const unsigned ndim = 2;
unsigned nthread = 1024;
// process 32 samples and 16 channels in a block
const unsigned nsamp_per_block = 32;
const unsigned nchan_per_block = 16;
const unsigned nchanblocks = ctx->nchan / nchan_per_block;
const unsigned nval_per_block = nsamp_per_block * nchan_per_block * ctx->nant;
const uint64_t nsamp = nbytes / (ctx->nchan * ctx->nant * ndim);
unsigned iant;
int shift;
const unsigned ichan = 0;
for (iant=0; iant < ctx->nant; iant++)
{
if (delays[iant][ichan].samples < ctx->half_ntap)
{
fprintf (stderr, "ERROR: [%d] delay in samples[%u] is less than ntap/2[%u]\n", iant, delays[iant][ichan].samples, ctx->half_ntap);
return 0;
}
if (ctx->first_kernel)
{
ctx->curr->h_delays[iant] = delays[iant][ichan].samples;
ctx->next->h_delays[iant] = delays[iant][ichan].samples;
ctx->curr->h_out_from[iant] = 0;
ctx->curr->h_in_from[iant] = ctx->curr->h_delays[iant] - ctx->half_ntap;
ctx->curr->h_in_to[iant] = nsamp;
ctx->curr->h_off[iant] = ctx->curr->h_in_to[iant] - ctx->curr->h_in_from[iant];
// should never be used on first iteration
ctx->next->h_out_from[iant] = 0;
ctx->next->h_in_from[iant] = nsamp;
ctx->next->h_in_to[iant] = 2 * nsamp;
}
else
{
// curr always uses delays from previous iteration
ctx->curr->h_out_from[iant] = ctx->curr->h_off[iant];
ctx->curr->h_in_from[iant] = 0;
ctx->curr->h_in_to[iant] = nsamp + (2 * ctx->half_ntap) - ctx->curr->h_off[iant];
if (nsamp + (2 * ctx->half_ntap) < ctx->curr->h_off[iant])
ctx->curr->h_in_to[iant] = 0;
// next always uses new delays
ctx->next->h_out_from[iant] = 0;
ctx->next->h_in_from[iant] = ctx->curr->h_in_to[iant] - (2 * ctx->half_ntap);
ctx->next->h_in_to[iant] = nsamp;
// handle a change in sample level delay this should be right
shift = delays[iant][ichan].samples - ctx->curr->h_delays[iant];
ctx->next->h_in_from[iant] += shift;
ctx->next->h_delays[iant] = delays[iant][ichan].samples;
ctx->next->h_off[iant] = ctx->next->h_in_to[iant] - ctx->next->h_in_from[iant];
}
}
/*
*/
#ifdef USE_CONSTANT_MEMORY
cudaMemcpyToSymbolAsync(curr_out_from, (void *) ctx->curr->h_out_from, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(curr_in_from, (void *) ctx->curr->h_in_from, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(curr_in_to, (void *) ctx->curr->h_in_to, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(next_out_from, (void *) ctx->next->h_out_from, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(next_in_from, (void *) ctx->next->h_in_from, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(next_in_to, (void *) ctx->next->h_in_to, ctx->curr->counter_size, 0, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
#else
cudaMemcpyAsync (ctx->curr->d_base, ctx->curr->h_base, ctx->curr->counter_bytes, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync (ctx->next->d_base, ctx->next->h_base, ctx->next->counter_bytes, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
#endif
// special case where not a clean multiple [TODO validate this!]
if (nval_per_block % nthread)
{
unsigned numerator = nval_per_block;
while ( numerator > nthread )
numerator /= 2;
nthread = numerator;
}
unsigned nval_per_thread = nval_per_block / nthread;
// the total number of values we have to process is
const uint64_t nval = nbytes / ndim;
// the total number of samples is
dim3 blocks = dim3 (nsamp / nsamp_per_block, nchanblocks);
if (nsamp % nsamp_per_block)
blocks.x++;
const size_t sdata_bytes = (nsamp_per_block * nchan_per_block * ctx->nant * ndim) + 256;
// nbytes of bytes different (for input) between each block of data
const unsigned samp_stride = ctx->nchan * ctx->nant;
const unsigned chan_block_stride = nchan_per_block * ctx->nant;
const unsigned out_chanant_stride = nsamp + (2 * ctx->half_ntap);
#ifdef _GDEBUG
fprintf (stderr, "transpose_delay: nval_per_block=%u, nval_per_thread=%u\n", nval_per_block, nval_per_thread);
fprintf (stderr, "transpose_delay: nbytes=%lu, nsamp=%lu, nval=%lu\n", nbytes, nsamp, nval);
fprintf (stderr, "transpose_delay: nthread=%d, blocks=(%d,%d,%d) sdata_bytes=%d\n", nthread, blocks.x, blocks.y, blocks.z, sdata_bytes);
fprintf (stderr, "transpose_delay: out_chanant_stride=%u\n", out_chanant_stride);
#endif
#ifdef USE_CONSTANT_MEMORY
hires_transpose_delay_kernel<<<blocks,nthread,sdata_bytes,stream>>>((int16_t *) d_in,
(int16_t *) ctx->curr->d_buffer, (int16_t *) ctx->next->d_buffer,
ctx->nchan, ctx->nant, nval, nval_per_thread, samp_stride, chan_block_stride, out_chanant_stride);
#else
hires_transpose_delay_kernel<<<blocks,nthread,sdata_bytes,stream>>>((int16_t *) d_in,
(int16_t *) ctx->curr->d_buffer, (int16_t *) ctx->next->d_buffer,
(unsigned *) ctx->curr->d_base, (unsigned *) ctx->next->d_base,
ctx->nchan, ctx->nant, nval, nval_per_thread, samp_stride, chan_block_stride, out_chanant_stride);
#endif
#if _GDEBUG
check_error_stream("hires_transpose_delay_kernel", stream);
#endif
if (ctx->first_kernel)
{
ctx->first_kernel = 0;
return 0;
}
else
{
transpose_delay_hires_buf_t * save = ctx->curr;
ctx->curr = ctx->next;
ctx->next = save;
return save->d_buffer;
}
}
#ifdef USE_CONSTANT_MEMORY
// fringe co-efficients are fast in constant memory here
__constant__ float fringe_coeffs[MOPSR_PFB_CHANANT_MAX];
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_fringe_rotate_kernel (int16_t * input, uint64_t ndat)
#else
__global__ void hires_fringe_rotate_kernel (int16_t * input, uint64_t ndat,
const float * __restrict__ d_fringes,
const float * __restrict__ d_ant_scales_delay)
#endif
{
const unsigned isamp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t idx = ichanant * ndat + isamp;
if (isamp >= ndat)
return;
cuFloatComplex fringe_phasor;
#ifdef USE_CONSTANT_MEMORY
// using constant memory should result in broadcast for this block/half warp
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#else
sincosf (d_fringes[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#endif
int16_t val16 = input[idx];
int8_t * val8ptr = (int8_t *) &val16;
const float scale = d_ant_scales_delay[iant];
float re = ((float) (val8ptr[0]) + 0.38) * scale;
float im = ((float) (val8ptr[1]) + 0.38) * scale;
cuComplex val = make_cuComplex (re, im);
cuComplex rotated = cuCmulf(val, fringe_phasor);
// output from signal processing, should have 0 mean data
// i.e. we range from -128 to 127
val8ptr[0] = (int8_t) rintf (cuCrealf(rotated));
val8ptr[1] = (int8_t) rintf (cuCimagf(rotated));
input[idx] = val16;
}
//
// Perform fractional delay correction, out-of-place
//
void hires_fringe_rotate (cudaStream_t stream, void * d_in,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#else
void * d_fringes,
void * d_ant_scales,
#endif
uint64_t nbytes, unsigned nchan,
unsigned nant)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
// number of threads that actually load data
unsigned nthread = 1024;
dim3 blocks (ndat / nthread, nant, nchan);
if (ndat % nthread)
blocks.x++;
#ifdef USE_CONSTANT_MEMORY
cudaMemcpyToSymbolAsync(fringe_coeffs, (void *) h_fringes, fringes_size, 0, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
#endif
#if _GDEBUG
fprintf (stderr, "fringe_rotate: bytes=%lu ndat=%lu\n", nbytes, ndat);
fprintf (stderr, "fringe_rotate: nthread=%d, blocks.x=%d, blocks.y=%d, blocks.z=%d\n", nthread, blocks.x, blocks.y, blocks.z);
#endif
#ifdef USE_CONSTANT_MEMORY
hires_fringe_rotate_kernel<<<blocks, nthread, 0, stream>>>((int16_t *) d_in, ndat);
#else
hires_fringe_rotate_kernel<<<blocks, nthread, 0, stream>>>((int16_t *) d_in, ndat, (float *) d_fringes, (float *) d_ant_scales);
#endif
#if _GDEBUG
check_error_stream("hires_fringe_rotate_kernel", stream);
#endif
}
#ifdef USE_CONSTANT_MEMORY
void hires_delay_copy_scales (cudaStream_t stream, float * h_ant_scales, size_t nbytes)
{
cudaMemcpyToSymbolAsync (d_ant_scales_delay, (void *) h_ant_scales, nbytes, 0, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
}
#endif
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_delay_fractional_kernel (int16_t * input, int16_t * output,
const float * __restrict__ fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
const float * __restrict__ d_fringes,
const float * __restrict__ d_ant_scales_delay,
#endif
unsigned nthread_run,
uint64_t nsamp_in,
const unsigned chan_stride,
const unsigned ant_stride,
const unsigned ntap)
{
// the input data for block are stored in blockDim.x values
extern __shared__ cuComplex fk_shared1[];
// the FIR filter stored in the final NTAP values
float * filter = (float *) (fk_shared1 + blockDim.x);
const unsigned half_ntap = (ntap / 2);
//const unsigned in_offset = 2 * half_ntap;
// iant blockIdx.y
// ichan blockIDx.z
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned ichanant = blockIdx.z * gridDim.y + blockIdx.y;
const unsigned nsamp_out = nsamp_in - ( 2 * half_ntap);
cuComplex fringe_phasor;
#ifdef USE_CONSTANT_MEMORY
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#else
sincosf (d_fringes[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
#endif
// read in the FIR cofficients
if (threadIdx.x < ntap)
filter[threadIdx.x] = fir_coeffs[(ichanant * ntap) + threadIdx.x];
if (isamp >= nsamp_in)
{
return;
}
// each thread must also load its data from main memory here chan_stride + ant_stride
const unsigned in_data_idx = (ichanant * nsamp_in) + isamp;
// const unsigned out_data_idx = ichanant * nsamp_out + isamp;
int16_t val16 = input[in_data_idx];
int8_t * val8ptr = (int8_t *) &val16;
{
const float scale = d_ant_scales_delay[blockIdx.y];
cuComplex val = make_cuComplex (((float) (val8ptr[0])) + 0.38, ((float) (val8ptr[1])) + 0.38);
val.x *= scale;
val.y *= scale;
fk_shared1[threadIdx.x] = cuCmulf(val, fringe_phasor);
}
__syncthreads();
// there are 2 * half_ntap threads that dont calculate anything
if ((threadIdx.x < nthread_run) && (isamp < nsamp_out))
{
float re = 0;
float im = 0;
for (unsigned i=0; i<ntap; i++)
{
re += cuCrealf(fk_shared1[threadIdx.x + i]) * filter[i];
im += cuCimagf(fk_shared1[threadIdx.x + i]) * filter[i];
}
// input is -127.5 to -127.5, output is -128 to 127
val8ptr[0] = (int8_t) rintf (re);
val8ptr[1] = (int8_t) rintf (im);
output[ichanant * nsamp_out + isamp] = val16;
}
}
// calculate the filter coefficients for each channel and antenna
__global__ void hires_calculate_fir_coeffs (float * delays, float * fir_coeffs, unsigned ntap)
{
const unsigned half_ntap = ntap / 2;
const unsigned ichanant = blockIdx.x;
const float itap = (float) threadIdx.x;
const float filter_order = ntap - 1;
float x = itap - delays[ichanant];
// Hamming window filter http://users.spa.aalto.fi/vpv/publications/vesan_vaitos/ch3_pt1_fir.pdf
float window = 0.54 - 0.46 * cos (2.0 * M_PI * x / filter_order);
float sinc = 1;
if (x != half_ntap)
{
x -= half_ntap;
x *= M_PI;
sinc = sinf(x) / x;
}
fir_coeffs[(ichanant * ntap) + threadIdx.x] = sinc * window;
}
// apply a fractional delay correction to a channel / antenna, warps will always
__global__ void hires_delay_fractional_float_kernel (int16_t * input,
cuFloatComplex * output, float * fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
float * fringe_coeffs,
#endif
unsigned nthread_run, uint64_t nsamp_in,
const unsigned chan_stride, const unsigned ant_stride,
const unsigned ntap)
{
extern __shared__ float fk_shared_filter[];
cuFloatComplex * in_shm = (cuFloatComplex *) (fk_shared_filter + ntap + 1);
const unsigned half_ntap = ntap / 2;
const unsigned in_offset = 2 * half_ntap;
const unsigned isamp = blockIdx.x * nthread_run + threadIdx.x;
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = ichan * nant + iant;
const unsigned nsamp_out = nsamp_in - in_offset;
// compute the complex term required for fringe stopping
cuFloatComplex fringe_phasor;
sincosf (fringe_coeffs[ichanant], &(fringe_phasor.y), &(fringe_phasor.x));
// read in the FIR cofficients
if (threadIdx.x < ntap)
{
fk_shared_filter[threadIdx.x] = fir_coeffs[(ichanant * ntap) + threadIdx.x];
}
// final block check for data input (not data output!)
if (isamp >= nsamp_in)
{
return;
}
// each thread must also load its data from main memory here chan_stride + ant_stride
const unsigned in_data_idx = (ichanant * nsamp_in) + isamp;
int16_t val16 = input[in_data_idx];
int8_t * val8ptr = (int8_t *) &val16;
cuFloatComplex val = make_cuComplex ((float) (val8ptr[0]) + 0.33, (float) (val8ptr[1]) + 0.33);
in_shm[threadIdx.x] = cuCmulf (val, fringe_phasor);
__syncthreads();
const unsigned osamp = (blockIdx.x * nthread_run) + threadIdx.x;
// there are 2 * half_ntap threads that dont calculate anything
if (threadIdx.x < nthread_run && osamp < nsamp_out)
{
cuFloatComplex sum = make_cuComplex(0,0);
for (unsigned i=0; i<ntap; i++)
{
val = in_shm[threadIdx.x + i];
val.x *= fk_shared_filter[i];
val.y *= fk_shared_filter[i];
sum = cuCaddf(sum, val);
}
unsigned ou_data_idx = (ichanant * nsamp_out) + osamp;
output[ou_data_idx] = sum;
}
}
//
// Perform fractional delay correction, out-of-place
//
void hires_delay_fractional (cudaStream_t stream, void * d_in, void * d_out,
float * d_delays, float * d_fir_coeffs,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#else
void * d_fringes, void * d_ant_scales,
#endif
uint64_t nbytes, unsigned nchan,
unsigned nant, unsigned ntap)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
// number of threads that actually load data
unsigned nthread_load = 1024;
if (ndat < nthread_load)
nthread_load = ndat;
unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
const size_t sdata_bytes = (nthread_load * ndim + ntap) * sizeof(float);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_load)
blocks.x++;
#ifdef USE_CONSTANT_MEMORY
cudaMemcpyToSymbolAsync (fringe_coeffs, (void *) h_fringes, fringes_size, 0, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
#endif
// calculate the FIR co-efficients to be use in the fractional delay
unsigned nthread = ntap;
unsigned nblock = nchan * nant;
hires_calculate_fir_coeffs<<<nblock,nthread,0,stream>>>((float *) d_delays, (float *) d_fir_coeffs, ntap);
#if _GDEBUG
check_error_stream("hires_calculate_fir_coeffs", stream);
#endif
#if _GDEBUG
fprintf (stderr, "delay_fractional: bytes=%lu ndat=%lu sdata_bytes=%ld\n", nbytes, ndat, sdata_bytes);
fprintf (stderr, "delay_fractional: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "delay_fractional: nthread_load=%d nthread_run=%d ntap=%d\n", nthread_load, nthread_run, ntap);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
#ifdef USE_CONSTANT_MEMORY
hires_delay_fractional_kernel<<<blocks, nthread_load, sdata_bytes, stream>>>((int16_t *) d_in, (int16_t *) d_out,
(float *) d_fir_coeffs, nthread_run, ndat, chan_stride, ant_stride, ntap);
#else
hires_delay_fractional_kernel<<<blocks, nthread_load, sdata_bytes, stream>>>((int16_t *) d_in, (int16_t *) d_out,
(float *) d_fir_coeffs, (float *) d_fringes, (float *) d_ant_scales, nthread_run, ndat, chan_stride, ant_stride, ntap);
#endif
#if _GDEBUG
check_error_stream("hires_delay_fractional_kernel", stream);
#endif
}
#ifdef HAVE_SHFL
__inline__ __device__
float warpReduceSumF(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float blockReduceSumF(float val)
{
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumF(val); // Each warp performs partial reduction
if (lane==0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSumF(val); //Final reduce within first warp
return val;
}
__inline__ __device__
float blockReduceSumFS(float * vals)
{
float val = vals[threadIdx.x];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumF(val); // Each warp performs partial reduction
if (lane==0) vals[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? vals[lane] : 0;
if (wid==0) val = warpReduceSumF(val); //Final reduce within first warp
return val;
}
__inline__ __device__
int warpReduceSumI(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
int blockReduceSumI(int val) {
static __shared__ int shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSumI(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSumI(val); //Final reduce within first warp
return val;
}
#endif
// Compute the mean of the re and imginary compoents for
__global__ void hires_measure_means_kernel (cuFloatComplex * in, cuFloatComplex * means, const unsigned nval_per_thread, const uint64_t ndat)
{
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t in_offset = ichanant * ndat;
cuFloatComplex * indat = in + in_offset;
unsigned idx = threadIdx.x * nval_per_thread;
cuFloatComplex val;
float sum_re = 0;
float sum_im = 0;
int count = 0;
for (unsigned ival=0; ival<nval_per_thread; ival++)
{
if (idx < ndat)
{
val = indat[idx];
sum_re += val.x;
sum_im += val.y;
count++;
}
idx += blockDim.x;
}
#ifdef HAVE_SHFL
// compute via block reduce sum
sum_re = blockReduceSumF(sum_re);
sum_im = blockReduceSumF(sum_im);
count = blockReduceSumI(count);
#endif
if (threadIdx.x == 0)
{
means[ichanant].x = sum_re / count;
means[ichanant].y = sum_im / count;
}
}
//
// Compute the S1 and S2 sums for blocks of input data, writing the S1 and S2 sums out to Gmem
//
__global__ void hires_skcompute_kernel (cuFloatComplex * in, float * s1s, float * s2s, const unsigned nval_per_thread, const uint64_t ndat)
{
extern __shared__ float skc_shm[];
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const uint64_t in_offset = ichanant * ndat;
// offset into the block for the current channel and antenna
cuFloatComplex * indat = in + in_offset;
unsigned idx = (blockIdx.x * blockDim.x + threadIdx.x) * nval_per_thread;
cuFloatComplex val;
float s1_sum = 0;
float s2_sum = 0;
float power;
for (unsigned ival=0; ival<nval_per_thread; ival++)
{
if (idx < ndat)
{
val = indat[idx];
power = (val.x * val.x) + (val.y * val.y);
s1_sum += power;
s2_sum += (power * power);
}
idx += blockDim.x;
}
#ifdef HAVE_SHFL
const unsigned warp_idx = threadIdx.x % 32;
const unsigned warp_num = threadIdx.x / 32;
s1_sum += __shfl_down (s1_sum, 16);
s1_sum += __shfl_down (s1_sum, 8);
s1_sum += __shfl_down (s1_sum, 4);
s1_sum += __shfl_down (s1_sum, 2);
s1_sum += __shfl_down (s1_sum, 1);
s2_sum += __shfl_down (s2_sum, 16);
s2_sum += __shfl_down (s2_sum, 8);
s2_sum += __shfl_down (s2_sum, 4);
s2_sum += __shfl_down (s2_sum, 2);
s2_sum += __shfl_down (s2_sum, 1);
if (warp_idx == 0)
{
skc_shm [warp_num] = s1_sum;
skc_shm [32+warp_num] = s2_sum;
}
__syncthreads();
if (warp_num == 0)
{
s1_sum = skc_shm [warp_idx];
s2_sum = skc_shm [32 + warp_idx];
s1_sum += __shfl_down (s1_sum, 16);
s1_sum += __shfl_down (s1_sum, 8);
s1_sum += __shfl_down (s1_sum, 4);
s1_sum += __shfl_down (s1_sum, 2);
s1_sum += __shfl_down (s1_sum, 1);
s2_sum += __shfl_down (s2_sum, 16);
s2_sum += __shfl_down (s2_sum, 8);
s2_sum += __shfl_down (s2_sum, 4);
s2_sum += __shfl_down (s2_sum, 2);
s2_sum += __shfl_down (s2_sum, 1);
}
#endif
if (threadIdx.x == 0)
{
// FST ordered
const unsigned out_idx = (ichanant * gridDim.x) + blockIdx.x;
//if (iant == 0 && ichan == 168)
// printf ("s1s[%u]=%f\n", out_idx, s1_sum);
s1s[out_idx] = s1_sum;
s2s[out_idx] = s2_sum;
}
}
void hires_test_skcompute (cudaStream_t stream, void * d_in, void * d_s1s_out, void * d_s2s_out, unsigned nchan, unsigned nant, unsigned nbytes)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim * sizeof(float));
const unsigned nthreads = 1024;
const unsigned nval_per_thread = 1;
size_t shm_bytes = 64 * sizeof(float);
dim3 blocks (ndat / nthreads, nant, nchan);
if (ndat % nthreads)
blocks.x++;
//#ifdef _GDEBUG
fprintf (stderr, "hires_skcompute_kernel: bytes=%lu ndat=%lu shm_bytes=%ld\n", nbytes, ndat, shm_bytes);
fprintf (stderr, "hires_skcompute_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d, nthreads=%u\n", blocks.x, blocks.y, blocks.z, nthreads);
fprintf (stderr, "hires_skcompute_kernel: d_in=%p d_s1s_out=%p, d_s2s_out=%p nval_per_thread=%u, ndat_sk=%lu\n", d_in, d_s1s_out, d_s2s_out, nval_per_thread, ndat);
//#endif
hires_skcompute_kernel<<<blocks, nthreads, shm_bytes, stream>>>( (cuFloatComplex *) d_in, (float *) d_s1s_out, (float *) d_s2s_out, nval_per_thread, ndat);
check_error_stream("hires_skcompute_kernel", stream);
}
__device__ inline void Comparator(
float &valA,
float &valB,
uint dir
)
{
float k;
if ((valA > valB) == dir)
{
k = valA;
valA = valB;
valB = k;
}
}
__device__ inline void shm_merge_sort (unsigned length, float * keys)
{
const unsigned maxthread = length / 2;
for (uint size = 2; size <= length; size <<= 1)
{
uint stride = size / 2;
uint offset = threadIdx.x & (stride - 1);
{
__syncthreads();
if (threadIdx.x < maxthread)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator (keys[pos + 0], keys[pos + stride], 1);
}
stride >>= 1;
}
for (; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
if (threadIdx.x < maxthread)
{
if (offset >= stride)
{
Comparator( keys[pos - stride], keys[pos + 0], 1);
}
}
}
}
__syncthreads();
}
// simplistic block wide shared memory sum, 1 val per thread
__device__ inline float shm_sum_thread (unsigned length, float * keys)
{
for (unsigned size=length/2; size>0; size >>= 1)
{
if (threadIdx.x < size)
keys[threadIdx.x] += keys[threadIdx.x + size];
__syncthreads();
}
return keys[0];
}
__global__ void shm_merge_sort_kernel2 (float *d_Dst,
float *d_Src,
unsigned arrayLength,
unsigned dir)
{
//Shared memory storage for one or more small vectors
__shared__ float keys[1024];
keys[threadIdx.x] = d_Src[threadIdx.x];
__syncthreads();
shm_merge_sort (arrayLength, keys);
__syncthreads();
d_Dst[threadIdx.x] = keys[threadIdx.x];
}
void test_merge_sort2 (cudaStream_t stream, float * d_key_out, float * d_key_in, unsigned length, unsigned direction) {
unsigned nblocks = 1;
unsigned nthreads = length;
shm_merge_sort_kernel2<<<nblocks, nthreads>>> (d_key_out, d_key_in, length, direction);
check_error_stream("shm_merge_sort_kernel2", stream);
return;
}
__global__ void hires_compute_sigmas_kernel (float * in, cuFloatComplex * thresholds,
float * voltage_sigmas, unsigned nsums)
{
extern __shared__ float csk_keys[];
// iant = blockIdx.y;
// nant = gridDim.y;
// ichan = blockIdx.z;
// nchan = gridDim.z;
const unsigned ichanant = (blockIdx.z * gridDim.y) + blockIdx.y;
float s1 = in[(ichanant * nsums) + threadIdx.x];
// read the 16 input values into shared memory
csk_keys[threadIdx.x] = s1;
__syncthreads();
// sort using shared memory
shm_merge_sort (nsums, csk_keys);
__syncthreads();
float median = csk_keys[nsums / 2];
__syncthreads();
// now subtract median from s1 value in key and take abs value
csk_keys[threadIdx.x] = fabsf(csk_keys[threadIdx.x] - median);
__syncthreads();
// now sort again
shm_merge_sort (nsums, csk_keys);
__syncthreads();
// convert median absolute deviation to standard deviation
float sigma = csk_keys[nsums / 2] * 1.4826;
// set the thresholds
if (threadIdx.x == 0)
{
thresholds[ichanant].x = median;
thresholds[ichanant].y = sigma;
}
csk_keys[threadIdx.x] = s1;
__syncthreads();
// simple sum whereby nsums == nthreads
s1 = shm_sum_thread (nsums, csk_keys);
if (threadIdx.x == 0)
{
voltage_sigmas[ichanant] = sqrtf (s1 / (nsums * 1024 * 2));
}
}
/*
*/
__global__ void hires_compute_power_limits_kernel (float * in, cuFloatComplex * thresholds,
float * voltage_sigmas, int8_t * mask, curandStatePhilox4_32_10_t * rstates, unsigned nsums, unsigned valid_memory,
unsigned nsigma, unsigned iblock)
{
// iant = blockIdx.y;
// nant = gridDim.y;
// ichan = blockIdx.z;
// nchan = gridDim.z;
//const unsigned n_elements = nsums * valid_memory;
// 1024 threads, 16 samples/block, 64 memory blocks, each thread does 1 samples
const unsigned mem_block = threadIdx.x / 16;
const unsigned mem_element = threadIdx.x % 16;
const unsigned ichanant = (blockIdx.z * gridDim.y) + blockIdx.y;
const unsigned nchanant = gridDim.z * gridDim.y;
// get the generator for this channel and antenna [gridDim.x == 1]
const unsigned id = ichanant * blockDim.x + threadIdx.x;
// a maximum of 32 * 32 keys [1024] will be handled by 1024 threads.
__shared__ float keys[1024];
// existing median and sigma for the S1s
float median = thresholds[ichanant].x;
float sigma = thresholds[ichanant].y;
float s1 = 0;
float s1_count = 0;
// S1 values stored as 64 sets of FST in blocks that are each 16 samples
// ichanant offset + offset into the memory [0-16]
if (mem_block < valid_memory)
{
s1 = in[(mem_block * nchanant * 16) + (ichanant * 16) + mem_element];
s1_count = 1;
// if skdetect has determined this sample is bad, generate something similar
if ((mem_block == iblock) && (mask[ichanant * 16 + mem_element] > 0))
{
s1 = median + (curand_normal (&(rstates[id])) * sigma);
in[(mem_block * nchanant * 16) + (ichanant * 16) + mem_element] = s1;
}
}
// now find the median and median absolute deviation (stddev)
keys[threadIdx.x] = s1;
__syncthreads();
// sort the nelements values using shared memory
shm_merge_sort (1024, keys);
__syncthreads();
unsigned centre = 1024 - ((valid_memory * 16) / 2);
median = keys[centre];
__syncthreads();
// now subtract median from s1 value in key and take abs value
if (s1 > 0)
keys[threadIdx.x] = fabsf(s1 - median);
else
keys[threadIdx.x] = 0;
__syncthreads();
// now sort again
shm_merge_sort (1024, keys);
__syncthreads();
// convert median absolute deviation to standard deviation
sigma = keys[centre] * 1.4826;
//if (blockIdx.z == 210 && blockIdx.y == 0 && iblock == 0 && threadIdx.x < 16)
// printf ("[%d] s1=%f centre=%u median=%f sigma=%f\n", threadIdx.x, s1, centre, median, sigma);
// now sum S1 across threads
#ifdef HAVE_SHFL
s1 += __shfl_down (s1, 16);
s1 += __shfl_down (s1, 8);
s1 += __shfl_down (s1, 4);
s1 += __shfl_down (s1, 2);
s1 += __shfl_down (s1, 1);
s1_count += __shfl_down (s1_count, 16);
s1_count += __shfl_down (s1_count, 8);
s1_count += __shfl_down (s1_count, 4);
s1_count += __shfl_down (s1_count, 2);
s1_count += __shfl_down (s1_count, 1);
#endif
unsigned warp_idx = threadIdx.x % 32;
unsigned warp_num = threadIdx.x / 32;
if (warp_idx == 0)
{
keys[warp_num] = s1;
keys[32+warp_num] = s1_count;
}
__syncthreads();
if (warp_num == 0)
{
s1 = keys[warp_idx];
s1_count = keys[32+warp_idx];
#ifdef HAVE_SHFL
s1 += __shfl_down (s1, 16);
s1 += __shfl_down (s1, 8);
s1 += __shfl_down (s1, 4);
s1 += __shfl_down (s1, 2);
s1 += __shfl_down (s1, 1);
s1_count += __shfl_down (s1_count, 16);
s1_count += __shfl_down (s1_count, 8);
s1_count += __shfl_down (s1_count, 4);
s1_count += __shfl_down (s1_count, 2);
s1_count += __shfl_down (s1_count, 1);
#endif
// this sigma is the stddev of the voltages (hence 1024 * 2)
if (warp_idx == 0)
{
//voltage_sigmas[ichanant] = sqrtf(s1 / (s1_count * 2048));
voltage_sigmas[ichanant] = sqrtf(median / 2048);
// now we have the median and sigma for the memory blocks of S1, compute the
// total power thresholds
thresholds[ichanant].x = median;
thresholds[ichanant].y = sigma;
}
}
}
void hires_test_compute_power_limits (cudaStream_t stream, void * d_s1s, void * d_sigmas,
void * d_thresh, void * d_mask, unsigned nsums, unsigned nant, unsigned nchan, uint64_t ndat,
uint64_t s1_count, unsigned s1_memory, void * d_rstates)
{
dim3 blocks_skm (1, nant, nchan);
unsigned nthreads = 1024;
const unsigned nsigma = 4;
unsigned valid_memory = s1_memory;
if (s1_count < s1_memory)
valid_memory = (unsigned) s1_count;
#ifdef _DEBUG
fprintf (stderr, "test_compute_power_limits: d_s1s=%p d_thresh=%p\n", d_s1s, d_thresh);
fprintf (stderr, "test_compute_power_limits: nant=%u nchan=%u ndat=%lu\n", nant, nchan, ndat);
fprintf (stderr, "test_compute_power_limits: nsums=%u nmemory=%u nsigma=%u\n", nsums, valid_memory, nsigma);
#endif
hires_compute_power_limits_kernel<<<blocks_skm,nthreads,0,stream>>>((float *) d_s1s,
(cuFloatComplex *) d_thresh, (float *) d_sigmas, (int8_t *) d_mask, (curandStatePhilox4_32_10_t *) d_rstates, nsums, valid_memory, nsigma, 0);
check_error_stream("hires_compute_power_limits_kernel", stream);
}
//
// take the S1 and S2 values in sums.x and sums.y that were computed
// from M samples, and integrate of nsums blocks to
// compute a sk mask and zap
//
__global__ void hires_skdetect_kernel (float * s1s, float * s2s, cuFloatComplex * power_thresholds,
int8_t * mask, float * sigmas,
unsigned nchan_sum, unsigned sk_nsigma,
unsigned nsums, unsigned M, unsigned nval_per_thread)
{
// zap mask for each set of M samples
extern __shared__ int8_t smask_det[];
// maximum to be 16384 samples (20.97152 ms)
// unsigned sk_idx_max = 16;
// given the buffer sizes of 16384 samples, we shall not exceed 2^14
// 2^11 is an important one: 10.24us * 2048 samples == 20.97152 ms
// maximum to be 2048 samples (20.97152 ms)
unsigned sk_idx_max = 14;
// 3 sigma
const float sk_low[15] = { 0, 0, 0, 0, 0,
0.387702, 0.492078, 0.601904, 0.698159, 0.775046,
0.834186, 0.878879, 0.912209, 0.936770, 0.954684};
const float sk_high[15] = { 0, 0, 0, 0, 0,
2.731480, 2.166000, 1.762970, 1.495970, 1.325420,
1.216950, 1.146930, 1.100750, 1.069730, 1.048570};
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned nchan = gridDim.z;
const unsigned ichanant = (ichan * nant) + iant;
// ASSUME! nsums == nthreads
// initialize zap mask to 0 in shared memory [FT order]
unsigned idx = threadIdx.x;
for (unsigned i=0; i<nchan_sum; i++)
{
smask_det[idx] = 0;
idx += nsums;
}
__syncthreads();
// log2 of 1024
const unsigned log2_M = (unsigned) log2f (M);
// S1 and S2 sums are stored as FST
s1s += (ichanant * nsums);
s2s += (ichanant * nsums);
idx = threadIdx.x;
if (!((ichan == 54 || ichan == 105 || ichan == 155 || ichan == 204)))
nchan_sum = 1;
// for each different boxcar width
for (unsigned sk_idx = log2_M; sk_idx < sk_idx_max; sk_idx ++)
{
// the number of S1 (1024 powers) to add to this boxcar
const unsigned to_add = (unsigned) exp2f (sk_idx - log2_M);
// prevent running over the end of the array
if (idx + to_add <= nsums)
{
const float m = (float) (M * to_add);
const float m_fac = (m + 1) / (m - 1);
// the number of channels that are considered bad
// 2 sigma == 9 channels
// 1 sigma == 25 channels
unsigned nchan_bad_count = 0;
const unsigned nchan_bad_limit = 12;
float sk_avg = 0;
unsigned cdx = idx;
// loop over the channels in our sum
for (unsigned i=ichan; i<(ichan+nchan_sum); i++)
{
const unsigned ica = i * nant + iant;
const float median = power_thresholds[ica].x;
const float sigma = power_thresholds[ica].y; // / sqrtf(to_add);
const float chan_sum_limit = 2 * sigma;
const float power_limit = 3 * sigma;
// compute the SK estimate for this boxcar width and channel
float s1 = 1e-10;
float s2 = 1e-10;
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
s1 += s1s[cdx + ichunk];
s2 += s2s[cdx + ichunk];
}
float sk_estimate = m_fac * (m * (s2 / (s1 * s1)) - 1);
sk_avg += sk_estimate;
float s1_avg = s1 / to_add;
// test the SK estimate for only the current channel
if (i == ichan)
{
if ((sk_estimate < sk_low[sk_idx]) || (sk_estimate > sk_high[sk_idx]))
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[idx+ichunk] = (int8_t) 1;
}
}
// test if the average S1 power exceeds the 3sigma limits from the long running median/sigma
if ((s1_avg > (median + power_limit)) || (s1_avg < (median - power_limit)))
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[idx+ichunk] = 3;
}
}
}
// phone call detector
// test if the average S1 power exceeds the special limit for channel summation
if (s1_avg > (median + chan_sum_limit))
nchan_bad_count ++;
// increment by 1 channel
cdx += (nant * nsums);
}
// if we this is a phone call band, check the limits on the SK Average and nchan_bad
if (nchan_sum == 50)
{
#ifdef SKAVG_METHOD
float mu2 = (4 * m * m) / ((m-1) * (m + 2) * (m + 3));
float one_sigma_idat = sqrtf(mu2 / nchan_sum);
float upper = 1 + (sk_nsigma * one_sigma_idat);
float lower = 1 - (sk_nsigma * one_sigma_idat);
sk_avg /= nchan_sum;
if ((sk_avg < lower) || (sk_avg > upper) || (nchan_bad_count > nchan_bad_limit))
#else
if (nchan_bad_count > nchan_bad_limit)
#endif
{
cdx = idx;
for (unsigned i=0; i<nchan_sum; i++)
{
for (unsigned ichunk=0; ichunk < to_add; ichunk++)
{
smask_det[cdx+ichunk] = 2;
}
cdx += nsums;
}
}
}
}
}
// now write out the SK mask to gmem
for (unsigned i=0; i < nchan_sum; i++)
{
if ((ichan + i) < nchan)
{
unsigned odx = (((ichan + i) * nant) + iant) * nsums + threadIdx.x;
unsigned sdx = i * nsums + threadIdx.x;
if ((sdx < nchan_sum * nsums) && (smask_det[sdx] > 0))
{
mask[odx] = smask_det[sdx];
}
}
}
}
void hires_test_skdetect (cudaStream_t stream, void * d_s1s, void * d_s2s, void * d_thresh,
void * d_mask, void * d_sigmas, unsigned nsums, unsigned nant,
unsigned nchan, uint64_t ndat)
{
unsigned M = 1024;
//////////////////////////////////////////////////////////
// mask the input data
dim3 blocks (1, nant, nchan);
unsigned nthreads = 1024;
unsigned nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nchan_sum = 50;
unsigned sk_nsigma = 4;
size_t shm_bytes = (nchan_sum + 1) * nsums * sizeof(uint8_t);
size_t mask_size = nsums * nchan * nant * sizeof(uint8_t);
cudaMemsetAsync (d_mask, 0, mask_size, stream);
cudaStreamSynchronize(stream);
fprintf (stderr, "hires_skdetect_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_skdetect_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skdetect_kernel: d_s1s=%p, d_s2s=%p, d_masks=%p, nsums=%u, M=%u, nval_per_thread=%u\n", d_s1s, d_s2s, d_mask, nsums, M, nval_per_thread);
hires_skdetect_kernel<<<blocks, nthreads, shm_bytes, stream>>>((float *) d_s1s, (float *) d_s2s, (cuFloatComplex *) d_thresh, (int8_t *) d_mask, (float *) d_sigmas, nchan_sum, sk_nsigma, nsums, M, nval_per_thread);
check_error_stream("hires_skdetect_kernel", stream);
}
//
// take the S1 and S2 values in sums.x and sums.y that were computed
// from M samples, and integrate of nsums blocks to
// compute a sk mask and zap
//
__global__ void hires_skmask_kernel (float * in, int8_t * out, int8_t * mask,
curandStatePhilox4_32_10_t * rstates, float * sigmas,
#ifndef USE_CONSTANT_MEMORY
const float * __restrict__ d_ant_scales_delay,
#endif
unsigned nsums, unsigned M, unsigned nval_per_thread,
unsigned nsamp_per_thread, uint64_t ndat,
char replace_noise)
{
const unsigned iant = blockIdx.y;
const unsigned nant = gridDim.y;
const unsigned ichan = blockIdx.z;
const unsigned ichanant = (ichan * nant) + iant;
const unsigned id = ichanant * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t localState = rstates[id];
float sigma = sigmas[ichanant];
int8_t * chanant_mask = mask + (ichanant * nsums);
// Jenet & Anderson 1998, 6-bit (2-bits for RFI) spacing
const float spacing = 0.09925; // 6-bit
//const float spacing = 0.02957; // 8-bit
// dont do antenna scaling here anymore for the moment, unless it is zero
const float ant_scale = d_ant_scales_delay[iant];
float data_factor = ant_scale / (sigma * spacing);
float rand_factor = ant_scale / spacing;
if (!replace_noise)
rand_factor = 0;
// now we want to zap all blocks of input that have an associated mask
// note that this kernel has only 1 block, with blockDim.x threads that may not match
const unsigned ndim = 2;
const unsigned nval_per_sum = M * ndim;
unsigned block_offset = (ichanant * ndat * ndim);
float * indat = in + block_offset;
int8_t * outdat = out + block_offset;
// foreach block of M samples (i.e. 1 sum)
for (unsigned isum=0; isum<nsums; isum++)
{
// use the threads to write out the int8_t scaled value (or zapped value)
// back to global memory. There are 2 * M values to write each iteration
unsigned idx = threadIdx.x;
#ifdef SHOW_MASK
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++)
{
if (idx < nval_per_sum)
{
outdat[idx] = (int8_t) chanant_mask[isum];
}
idx += blockDim.x;
}
#else
if (chanant_mask[isum] > 0)
{
// it is more efficient to generate 4 floats at a time
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp+=4)
{
const float4 inval = curand_normal4 (&localState);
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.x * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.y * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.z * rand_factor);
idx += blockDim.x;
if (idx < nval_per_sum)
outdat[idx] = (int8_t) rintf(inval.w * rand_factor);
idx += blockDim.x;
}
}
else
{
for (unsigned isamp=0; isamp<nsamp_per_thread; isamp++)
{
if (idx < nval_per_sum)
{
outdat[idx] = (int8_t) rintf (indat[idx] * data_factor);
}
idx += blockDim.x;
}
}
#endif
outdat += ndim * M;
indat += ndim * M;
}
rstates[id] = localState;
}
void hires_test_skmask (cudaStream_t stream, void * d_in, void * d_out, void * d_mask, void * d_rstates, void * d_sigmas,
#ifndef USE_CONSTANT_MEMORY
void * d_ant_scales_delay,
#endif
unsigned nsums, unsigned nchan, unsigned nant, uint64_t ndat, char replace_noise)
{
unsigned M = 1024;
unsigned ndim = 2;
//////////////////////////////////////////////////////////
// mask the input data
dim3 blocks (1, nant, nchan);
unsigned nthreads = 1024;
unsigned nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nsamp_per_thread = (M * ndim) / nthreads;
if (M % nthreads)
nsamp_per_thread++;
size_t shm_bytes = 0;
fprintf (stderr, "hires_skmask_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_skmask_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skmask_kernel: d_in=%p d_out=%p, d_mask=%p, nsums=%u M=%u, nval_per_thread=%u, nsamp_per_thread=%u ndat=%lu\n", d_in, d_out, d_in, nsums, M, nval_per_thread, nsamp_per_thread, ndat);
hires_skmask_kernel<<<blocks, nthreads, shm_bytes, stream>>>((float *) d_in, (int8_t *) d_out,
(int8_t *) d_mask, (curandStatePhilox4_32_10_t *) d_rstates, (float *) d_sigmas,
#ifndef USE_CONSTANT_MEMORY
(float *) d_ant_scales_delay,
#endif
nsums, M, nval_per_thread, nsamp_per_thread, ndat, replace_noise);
check_error_stream("hires_skmask_kernel", stream);
}
__global__ void hires_srand_setup_kernel_sparse (unsigned long long seed, unsigned pfb_idx, unsigned nrngs, unsigned nval_per_thread, curandStatePhilox4_32_10_t * rstates)
{
unsigned id = threadIdx.x;
unsigned long long sequence = (blockDim.x * pfb_idx) + threadIdx.x;
unsigned long long local_seed = seed;
unsigned long long offset = 0;
unsigned long long skip = nrngs;
curandStatePhilox4_32_10_t local_state;
curand_init (local_seed, sequence, offset, &local_state);
rstates[id] = local_state;
id += blockDim.x;
for (unsigned i=1; i<nval_per_thread; i++)
{
skipahead_sequence (skip, &local_state);
rstates[id] = local_state;
id += blockDim.x;
}
}
void hires_init_rng_sparse (cudaStream_t stream, unsigned long long seed, unsigned nrngs, unsigned pfb_idx, unsigned npfb, void * states)
{
unsigned nthreads = 1024;
unsigned nval_per_thread = nrngs / nthreads;
#if _GDEBUG
fprintf (stderr, "rand_setup: nrngs=%u nval_per_thread=%u nthreads=%u pfb_idx=%u\n", nrngs, nval_per_thread, nthreads, pfb_idx);
#endif
hires_srand_setup_kernel_sparse<<<1, nthreads, 0, stream>>>(seed, pfb_idx, nrngs, nval_per_thread, (curandStatePhilox4_32_10_t *) states);
#if _GDEBUG
check_error_stream("hires_srand_setup_kernel", stream);
#endif
}
__global__ void hires_srand_setup_kernel (unsigned long long seed, unsigned pfb_idx, unsigned npfb, curandStatePhilox4_32_10_t *states)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
// local seed will be different for each of NCHAN * NANT * 1024 generators
// sequence 0, since each sequence increment involves 2^67 steps through the RNG sequence!
// offset is the step through the random sequence
unsigned long long sequence = 0;
unsigned long long offset = id;
unsigned long long local_seed = (seed << 20) + id;
// more efficient, since moving along the sequence of a seed is expensive
curand_init (local_seed, sequence, offset, &states[id]);
//curand_init( (seed << 20) + id, 0, 0, &states[id]);
}
void hires_init_rng (cudaStream_t stream, unsigned long long seed, unsigned nrngs, unsigned pfb_idx, unsigned npfb, void * states)
{
unsigned nthreads = 1024;
unsigned nblocks = nrngs / nthreads;
#if _GDEBUG
fprintf (stderr, "rand_setup: nblocks=%u nthreads=%u\n", nblocks, nthreads);
#endif
hires_srand_setup_kernel<<<nblocks, nthreads, 0, stream>>>(seed, pfb_idx, npfb, (curandStatePhilox4_32_10_t *) states);
#if _GDEBUG
check_error_stream("hires_srand_setup_kernel", stream);
#endif
}
// out-of-place
//
void hires_delay_fractional_sk_scale (cudaStream_t stream,
void * d_in, void * d_out, void * d_fbuf, void * d_rstates,
void * d_sigmas, void * d_mask, float * d_delays, void * d_fir_coeffs,
#ifndef USE_CONSTANT_MEMORY
void * d_fringes, void * d_ant_scales,
#endif
void * d_s1s, void * d_s2s, void * d_thresh,
#ifdef USE_CONSTANT_MEMORY
float * h_fringes, size_t fringes_size,
#endif
uint64_t nbytes, unsigned nchan, unsigned nant, unsigned ntap,
unsigned s1_memory, uint64_t s1_count, char replace_noise)
{
const unsigned ndim = 2;
const uint64_t ndat = nbytes / (nchan * nant * ndim);
const unsigned half_ntap = ntap / 2;
#ifdef USE_CONSTANT_MEMORY
// copy the fringe coeffs and delays to GPU memory
cudaMemcpyToSymbolAsync (fringe_coeffs, (void *) h_fringes, fringes_size, 0, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
#endif
// calculate the FIT co-efficients to be use in the fractional delay
unsigned nthread = ntap;
unsigned nblock = nchan * nant;
hires_calculate_fir_coeffs<<<nblock,nthread,0,stream>>>((float *) d_delays, (float *) d_fir_coeffs, ntap);
#if _GDEBUG
check_error_stream("hires_calculate_fir_coeffs", stream);
#endif
// number of threads that actually load data
unsigned nthread_load = 1024;
if (ndat < nthread_load)
nthread_load = ndat;
unsigned nthread_run = nthread_load - (2 * half_ntap);
// need shared memory to load the ntap coefficients + nthread_load data points
//const size_t sdata_bytes = (nthread_load * ndim + ntap) * sizeof(float);
const size_t sdata_bytes = (nthread_load * ndim + ntap + 1) * sizeof(float);
dim3 blocks (ndat / nthread_run, nant, nchan);
if (ndat % nthread_load)
blocks.x++;
#if _GDEBUG
fprintf (stderr, "hires_delay_fractional_float_kernel: bytes=%lu ndat=%lu sdata_bytes=%ld\n", nbytes, ndat, sdata_bytes);
fprintf (stderr, "hires_delay_fractional_float_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d\n", blocks.x, blocks.y, blocks.z);
fprintf (stderr, "hires_delay_fractional_float_kernel: nthread_load=%d nthread_run=%d ntap=%d\n", nthread_load, nthread_run, ntap);
#endif
const unsigned chan_stride = nant * ndat;
const unsigned ant_stride = ndat;
#ifdef USE_CONSTANT_MEMORY
hires_delay_fractional_float_kernel<<<blocks, nthread_load, sdata_bytes, stream>>>((int16_t *) d_in,
(cuFloatComplex *) d_fbuf, (float *) d_fir_coeffs, nthread_run,
ndat, chan_stride, ant_stride, ntap);
#else
hires_delay_fractional_float_kernel<<<blocks, nthread_load, sdata_bytes, stream>>>((int16_t *) d_in,
(cuFloatComplex *) d_fbuf, (float *) d_fir_coeffs, (float *) d_fringes,
nthread_run, ndat, chan_stride, ant_stride, ntap);
#endif
#if _GDEBUG
check_error_stream("hires_delay_fractional_float_kernel", stream);
#endif
/////////////////////////////////////////////////////////
// Calculate kurtosis sums
// TODO fix this configuration
unsigned M = 1024;
unsigned nthreads = 1024;
const uint64_t ndat_sk = ndat - (ntap - 1);
unsigned nval_per_thread = 1;
if (M > nthreads)
nval_per_thread = M / nthreads;
else
nthreads = M;
size_t shm_bytes;
// each block is a single integration
//shm_bytes = M * ndim * sizeof(float);
///////////////////////////////////////////////////////
// compute the means of each antenna / channel
//blocks.x = 1;
//shm_bytes = 0;
//unsigned nval_per_thread_mean = ndat_sk / 1024;
//hires_measure_means_kernel <<<blocks, nthreads, shm_bytes, stream>>>( (cuFloatComplex *) d_fbuf,
// (cuFloatComplex *) d_means, nval_per_thread_mean, ndat_sk);
///////////////////////////////////////////////////////
// compute the S1 and S2 values from the input
//
blocks.x = ndat_sk / M;
shm_bytes = 64 * sizeof(float);
#if _GDEBUG
fprintf (stderr, "hires_skcompute_kernel: bytes=%lu ndat=%lu shm_bytes=%ld\n", nbytes, ndat_sk, shm_bytes);
fprintf (stderr, "hires_skcompute_kernel: blocks.x=%d, blocks.y=%d, blocks.z=%d, nthreads=%u\n", blocks.x, blocks.y, blocks.z, nthreads);
fprintf (stderr, "hires_skcompute_kernel: d_fbuf=%p d_in=%p, nval_per_thread=%u, ndat_sk=%lu\n", d_fbuf, d_in, nval_per_thread, ndat_sk);
#endif
unsigned s1_idx = (unsigned) ((s1_count-1) % s1_memory);
float * d_s1s_curr = ((float *) d_s1s) + (s1_idx * blocks.x * nchan * nant);
// reuse d_in as a temporary work buffer for the S1 and S2 sums
hires_skcompute_kernel<<<blocks, nthreads, shm_bytes, stream>>>( (cuFloatComplex *) d_fbuf, (float *) d_s1s_curr, (float *) d_s2s, nval_per_thread, ndat_sk);
#if _GDEBUG
check_error_stream("hires_skcompute_kernel", stream);
#endif
//
unsigned nsums = blocks.x;
dim3 blocks_skm (1, nant, nchan);
#ifdef MEDIAN_FILTER
/////////////////////////////////////////////////////////
// compute the power limits based on the S1 and S2 values
#ifdef _GDEBUG
fprintf (stderr, "ndat=%lu ndat_sk=%lu nsums=%u\n", ndat, ndat_sk, nsums);
fprintf (stderr, "s1_idx=%u s1_count=%u\n", s1_idx, s1_count);
#endif
const unsigned nsigma = 3;
unsigned valid_memory = s1_memory;
if (s1_count < s1_memory)
valid_memory = (unsigned) s1_count;
shm_bytes = 0;
// on first iteration, compute sigmas and thresholds
if (s1_count == 1)
{
nthreads = nsums;
shm_bytes = nthreads * sizeof(float);
hires_compute_sigmas_kernel<<<blocks_skm,nthreads,shm_bytes,stream>>>((float *) d_s1s, (cuFloatComplex *) d_thresh, (float *) d_sigmas, nsums);
#if _GDEBUG
check_error_stream("hires_compute_sigmas_kernel", stream);
#endif
}
#endif
//////////////////////////////////////////////////////////
// mask the input data
nthreads = 1024;
nval_per_thread = 1;
if (nsums > nthreads)
{
nval_per_thread = nsums / nthreads;
if (nsums % nthreads)
nval_per_thread++;
}
else
nthreads = nsums;
unsigned nsamp_per_thread = (M * ndim) / nthreads;
if (M % nthreads)
nsamp_per_thread++;
unsigned nchan_sum = 50;
unsigned sk_nsigma = 4;
shm_bytes = nchan_sum * nsums * sizeof(uint8_t);
size_t mask_size = nsums * nchan * nant * sizeof(uint8_t);
cudaMemsetAsync (d_mask, 0, mask_size, stream);
cudaStreamSynchronize(stream);
#if _GDEBUG
fprintf (stderr, "hires_skdetect_kernel: blocks_skm.x=%d, blocks_skm.y=%d, blocks_skm.z=%d\n", blocks_skm.x, blocks_skm.y, blocks_skm.z);
fprintf (stderr, "hires_skdetect_kernel: nthreads=%u shm_bytes=%ld\n", nthreads, shm_bytes);
fprintf (stderr, "hires_skdetect_kernel: d_fbuf=%p d_out=%p, d_in=%p, nsums=%u M=%u, nval_per_thread=%u\n", d_fbuf, d_out, d_thresh, nsums, M, nval_per_thread);
#endif
hires_skdetect_kernel<<<blocks_skm, nthreads, shm_bytes, stream>>>(d_s1s_curr, (float *) d_s2s, (cuFloatComplex *) d_thresh, (int8_t *) d_mask, (float *) d_sigmas, nchan_sum, sk_nsigma, nsums, M, nval_per_thread);
#if _GDEBUG
check_error_stream("hires_skdetect_kernel", stream);
#endif
shm_bytes = nchan_sum * nsums;
#if _GDEBUG
fprintf (stderr, "hires_skmask_kernel: blocks_skm.x=%d, blocks_skm.y=%d, "
"blocks_skm.z=%d\n", blocks_skm.x, blocks_skm.y, blocks_skm.z);
fprintf (stderr, "hires_skmask_kernel: nthreads=%u shm_bytes=%ld\n",
nthreads, shm_bytes);
fprintf (stderr, "hires_skmask_kernel: d_fbuf=%p d_out=%p, d_in=%p, nsums=%u "
"M=%u, nval_per_thread=%u, nsamp_per_thread=%u ndat_sk=%lu\n",
d_fbuf, d_out, d_in, nsums, M, nval_per_thread, nsamp_per_thread,
ndat_sk);
#endif
// now compute the power limits for a kernel, taking the mask into account, updating the thresholds and sigmas
unsigned nthreads_cpl = 1024;
shm_bytes = 0;
hires_compute_power_limits_kernel<<<blocks_skm,nthreads_cpl,shm_bytes,stream>>>((float *) d_s1s, (cuFloatComplex *) d_thresh, (float *) d_sigmas, (int8_t *) d_mask, (curandStatePhilox4_32_10_t *) d_rstates, nsums, valid_memory, nsigma, s1_idx);
shm_bytes = 0;
hires_skmask_kernel<<<blocks_skm, nthreads, shm_bytes, stream>>>((float *) d_fbuf, (int8_t *) d_out,
(int8_t *) d_mask, (curandStatePhilox4_32_10_t *) d_rstates, (float *) d_sigmas,
#ifndef USE_CONSTANT_MEMORY
(float *) d_ant_scales,
#endif
nsums, M, nval_per_thread, nsamp_per_thread, ndat_sk, replace_noise);
#if _GDEBUG
check_error_stream("hires_skmask_kernel", stream);
#endif
}
// wrapper for getting curandStatePhilox4_32_10_t_t size
size_t hires_curandState_size()
{
return sizeof(curandStatePhilox4_32_10_t);
}
|
074e1bfd4b95f2c612e07ffa597cc142f8a54582.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#ifdef LAYER_STATISTIC
//std::cout << "\tBottom/Top: " << bottom[0]->count() << " " << top[0]->count() << std::endl;
//std::cout << "\tCompute: "<< 2 * top[0]->count() << std::endl;
#endif
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in,
const Dtype* const scale, const Dtype negative_beta, Dtype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template void LRNLayer<float>::CrossChannelForward_gpu(
const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top);
template void LRNLayer<double>::CrossChannelForward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top);
template <typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeDiff), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
bottom[0]->mutable_gpu_diff());
}
template void LRNLayer<float>::CrossChannelBackward_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom);
template void LRNLayer<double>::CrossChannelBackward_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom);
INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer);
} // namespace caffe
|
074e1bfd4b95f2c612e07ffa597cc142f8a54582.cu
|
#include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#ifdef LAYER_STATISTIC
//std::cout << "\tBottom/Top: " << bottom[0]->count() << " " << top[0]->count() << std::endl;
//std::cout << "\tCompute: "<< 2 * top[0]->count() << std::endl;
#endif
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in,
const Dtype* const scale, const Dtype negative_beta, Dtype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template void LRNLayer<float>::CrossChannelForward_gpu(
const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top);
template void LRNLayer<double>::CrossChannelForward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top);
template <typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
bottom[0]->mutable_gpu_diff());
}
template void LRNLayer<float>::CrossChannelBackward_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom);
template void LRNLayer<double>::CrossChannelBackward_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom);
INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer);
} // namespace caffe
|
c2da91334fb799ebe612853abe95462ece9eae46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel2;
int xdim0_reset_field_kernel2_h = -1;
__constant__ int ydim0_reset_field_kernel2;
int ydim0_reset_field_kernel2_h = -1;
__constant__ int xdim1_reset_field_kernel2;
int xdim1_reset_field_kernel2_h = -1;
__constant__ int ydim1_reset_field_kernel2;
int ydim1_reset_field_kernel2_h = -1;
__constant__ int xdim2_reset_field_kernel2;
int xdim2_reset_field_kernel2_h = -1;
__constant__ int ydim2_reset_field_kernel2;
int ydim2_reset_field_kernel2_h = -1;
__constant__ int xdim3_reset_field_kernel2;
int xdim3_reset_field_kernel2_h = -1;
__constant__ int ydim3_reset_field_kernel2;
int ydim3_reset_field_kernel2_h = -1;
__constant__ int xdim4_reset_field_kernel2;
int xdim4_reset_field_kernel2_h = -1;
__constant__ int ydim4_reset_field_kernel2;
int ydim4_reset_field_kernel2_h = -1;
__constant__ int xdim5_reset_field_kernel2;
int xdim5_reset_field_kernel2_h = -1;
__constant__ int ydim5_reset_field_kernel2;
int ydim5_reset_field_kernel2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel2*(y)+xdim0_reset_field_kernel2*ydim0_reset_field_kernel2*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel2*(y)+xdim1_reset_field_kernel2*ydim1_reset_field_kernel2*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel2*(y)+xdim2_reset_field_kernel2*ydim2_reset_field_kernel2*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel2*(y)+xdim3_reset_field_kernel2*ydim3_reset_field_kernel2*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_reset_field_kernel2*(y)+xdim4_reset_field_kernel2*ydim4_reset_field_kernel2*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_reset_field_kernel2*(y)+xdim5_reset_field_kernel2*ydim5_reset_field_kernel2*(z))
//user function
__device__
void reset_field_kernel2_gpu( double *xvel0, const double *xvel1,
double *yvel0, const double *yvel1,
double *zvel0, const double *zvel1) {
xvel0[OPS_ACC0(0,0,0)] = xvel1[OPS_ACC1(0,0,0)] ;
yvel0[OPS_ACC2(0,0,0)] = yvel1[OPS_ACC3(0,0,0)] ;
zvel0[OPS_ACC4(0,0,0)] = zvel1[OPS_ACC5(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_reset_field_kernel2(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
double* __restrict arg4,
const double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel2 + idx_z * 1*1 * xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel2 + idx_z * 1*1 * xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel2 + idx_z * 1*1 * xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel2 + idx_z * 1*1 * xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_reset_field_kernel2 + idx_z * 1*1 * xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_reset_field_kernel2 + idx_z * 1*1 * xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel2_gpu(arg0, arg1, arg2, arg3,
arg4, arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_reset_field_kernel2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,139)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel2");
OPS_kernels[139].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel2_h || ydim0 != ydim0_reset_field_kernel2_h || xdim1 != xdim1_reset_field_kernel2_h || ydim1 != ydim1_reset_field_kernel2_h || xdim2 != xdim2_reset_field_kernel2_h || ydim2 != ydim2_reset_field_kernel2_h || xdim3 != xdim3_reset_field_kernel2_h || ydim3 != ydim3_reset_field_kernel2_h || xdim4 != xdim4_reset_field_kernel2_h || ydim4 != ydim4_reset_field_kernel2_h || xdim5 != xdim5_reset_field_kernel2_h || ydim5 != ydim5_reset_field_kernel2_h) {
hipMemcpyToSymbol( xdim0_reset_field_kernel2, &xdim0, sizeof(int) );
xdim0_reset_field_kernel2_h = xdim0;
hipMemcpyToSymbol( ydim0_reset_field_kernel2, &ydim0, sizeof(int) );
ydim0_reset_field_kernel2_h = ydim0;
hipMemcpyToSymbol( xdim1_reset_field_kernel2, &xdim1, sizeof(int) );
xdim1_reset_field_kernel2_h = xdim1;
hipMemcpyToSymbol( ydim1_reset_field_kernel2, &ydim1, sizeof(int) );
ydim1_reset_field_kernel2_h = ydim1;
hipMemcpyToSymbol( xdim2_reset_field_kernel2, &xdim2, sizeof(int) );
xdim2_reset_field_kernel2_h = xdim2;
hipMemcpyToSymbol( ydim2_reset_field_kernel2, &ydim2, sizeof(int) );
ydim2_reset_field_kernel2_h = ydim2;
hipMemcpyToSymbol( xdim3_reset_field_kernel2, &xdim3, sizeof(int) );
xdim3_reset_field_kernel2_h = xdim3;
hipMemcpyToSymbol( ydim3_reset_field_kernel2, &ydim3, sizeof(int) );
ydim3_reset_field_kernel2_h = ydim3;
hipMemcpyToSymbol( xdim4_reset_field_kernel2, &xdim4, sizeof(int) );
xdim4_reset_field_kernel2_h = xdim4;
hipMemcpyToSymbol( ydim4_reset_field_kernel2, &ydim4, sizeof(int) );
ydim4_reset_field_kernel2_h = ydim4;
hipMemcpyToSymbol( xdim5_reset_field_kernel2, &xdim5, sizeof(int) );
xdim5_reset_field_kernel2_h = xdim5;
hipMemcpyToSymbol( ydim5_reset_field_kernel2, &ydim5, sizeof(int) );
ydim5_reset_field_kernel2_h = ydim5;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_reset_field_kernel2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[139].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 139;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 139;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_reset_field_kernel2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel2");
}
ops_enqueue_kernel(desc);
}
#endif
|
c2da91334fb799ebe612853abe95462ece9eae46.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel2;
int xdim0_reset_field_kernel2_h = -1;
__constant__ int ydim0_reset_field_kernel2;
int ydim0_reset_field_kernel2_h = -1;
__constant__ int xdim1_reset_field_kernel2;
int xdim1_reset_field_kernel2_h = -1;
__constant__ int ydim1_reset_field_kernel2;
int ydim1_reset_field_kernel2_h = -1;
__constant__ int xdim2_reset_field_kernel2;
int xdim2_reset_field_kernel2_h = -1;
__constant__ int ydim2_reset_field_kernel2;
int ydim2_reset_field_kernel2_h = -1;
__constant__ int xdim3_reset_field_kernel2;
int xdim3_reset_field_kernel2_h = -1;
__constant__ int ydim3_reset_field_kernel2;
int ydim3_reset_field_kernel2_h = -1;
__constant__ int xdim4_reset_field_kernel2;
int xdim4_reset_field_kernel2_h = -1;
__constant__ int ydim4_reset_field_kernel2;
int ydim4_reset_field_kernel2_h = -1;
__constant__ int xdim5_reset_field_kernel2;
int xdim5_reset_field_kernel2_h = -1;
__constant__ int ydim5_reset_field_kernel2;
int ydim5_reset_field_kernel2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel2*(y)+xdim0_reset_field_kernel2*ydim0_reset_field_kernel2*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel2*(y)+xdim1_reset_field_kernel2*ydim1_reset_field_kernel2*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel2*(y)+xdim2_reset_field_kernel2*ydim2_reset_field_kernel2*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel2*(y)+xdim3_reset_field_kernel2*ydim3_reset_field_kernel2*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_reset_field_kernel2*(y)+xdim4_reset_field_kernel2*ydim4_reset_field_kernel2*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_reset_field_kernel2*(y)+xdim5_reset_field_kernel2*ydim5_reset_field_kernel2*(z))
//user function
__device__
void reset_field_kernel2_gpu( double *xvel0, const double *xvel1,
double *yvel0, const double *yvel1,
double *zvel0, const double *zvel1) {
xvel0[OPS_ACC0(0,0,0)] = xvel1[OPS_ACC1(0,0,0)] ;
yvel0[OPS_ACC2(0,0,0)] = yvel1[OPS_ACC3(0,0,0)] ;
zvel0[OPS_ACC4(0,0,0)] = zvel1[OPS_ACC5(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_reset_field_kernel2(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
double* __restrict arg4,
const double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel2 + idx_z * 1*1 * xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel2 + idx_z * 1*1 * xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel2 + idx_z * 1*1 * xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel2 + idx_z * 1*1 * xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_reset_field_kernel2 + idx_z * 1*1 * xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_reset_field_kernel2 + idx_z * 1*1 * xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel2_gpu(arg0, arg1, arg2, arg3,
arg4, arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_reset_field_kernel2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,139)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel2");
OPS_kernels[139].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel2_h || ydim0 != ydim0_reset_field_kernel2_h || xdim1 != xdim1_reset_field_kernel2_h || ydim1 != ydim1_reset_field_kernel2_h || xdim2 != xdim2_reset_field_kernel2_h || ydim2 != ydim2_reset_field_kernel2_h || xdim3 != xdim3_reset_field_kernel2_h || ydim3 != ydim3_reset_field_kernel2_h || xdim4 != xdim4_reset_field_kernel2_h || ydim4 != ydim4_reset_field_kernel2_h || xdim5 != xdim5_reset_field_kernel2_h || ydim5 != ydim5_reset_field_kernel2_h) {
cudaMemcpyToSymbol( xdim0_reset_field_kernel2, &xdim0, sizeof(int) );
xdim0_reset_field_kernel2_h = xdim0;
cudaMemcpyToSymbol( ydim0_reset_field_kernel2, &ydim0, sizeof(int) );
ydim0_reset_field_kernel2_h = ydim0;
cudaMemcpyToSymbol( xdim1_reset_field_kernel2, &xdim1, sizeof(int) );
xdim1_reset_field_kernel2_h = xdim1;
cudaMemcpyToSymbol( ydim1_reset_field_kernel2, &ydim1, sizeof(int) );
ydim1_reset_field_kernel2_h = ydim1;
cudaMemcpyToSymbol( xdim2_reset_field_kernel2, &xdim2, sizeof(int) );
xdim2_reset_field_kernel2_h = xdim2;
cudaMemcpyToSymbol( ydim2_reset_field_kernel2, &ydim2, sizeof(int) );
ydim2_reset_field_kernel2_h = ydim2;
cudaMemcpyToSymbol( xdim3_reset_field_kernel2, &xdim3, sizeof(int) );
xdim3_reset_field_kernel2_h = xdim3;
cudaMemcpyToSymbol( ydim3_reset_field_kernel2, &ydim3, sizeof(int) );
ydim3_reset_field_kernel2_h = ydim3;
cudaMemcpyToSymbol( xdim4_reset_field_kernel2, &xdim4, sizeof(int) );
xdim4_reset_field_kernel2_h = xdim4;
cudaMemcpyToSymbol( ydim4_reset_field_kernel2, &ydim4, sizeof(int) );
ydim4_reset_field_kernel2_h = ydim4;
cudaMemcpyToSymbol( xdim5_reset_field_kernel2, &xdim5, sizeof(int) );
xdim5_reset_field_kernel2_h = xdim5;
cudaMemcpyToSymbol( ydim5_reset_field_kernel2, &ydim5, sizeof(int) );
ydim5_reset_field_kernel2_h = ydim5;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_reset_field_kernel2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[139].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[139].mpi_time += t2-t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 139;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 139;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_reset_field_kernel2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(139,"reset_field_kernel2");
}
ops_enqueue_kernel(desc);
}
#endif
|
68b5e6f0a7918e6e5875b5a3cf32ab284cbaa76c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <ctime>
#include "lcs.h"
#include "hip/hip_runtime.h"
#include "CUDAHostFunctions.h"
#include "CUDAKernels.h"
void LaunchGPUForInitialCellLocation(double minX, double maxX, double minY, double maxY, double minZ, double maxZ,
int xRes, int yRes, int zRes,
int *&initialCellLocations,
int *&gridCounts,
int *&d_cellLocations,
int *&d_gridCounts,
int globalNumOfCells,
double *d_vertexPositions,
int *d_tetrahedralConnectivities,
double epsilon) {
/// DEBUG ///
//printf("**********************************************epsilon = %lf\n", epsilon);
hipError_t err;
double dx = (maxX - minX) / xRes;
double dy = (maxY - minY) / yRes;
double dz = (maxZ - minZ) / zRes;
int numOfGridPoints = (xRes + 1) * (yRes + 1) * (zRes + 1);
initialCellLocations = new int [numOfGridPoints];
//memset(initialCellLocations, 255, sizeof(int) * numOfGridPoints);
for (int i = 0; i < numOfGridPoints; i++)
initialCellLocations[i] = -1;
gridCounts = new int [numOfGridPoints];
memset(gridCounts, 0, sizeof(int) * numOfGridPoints);
// Create CUDA C buffer pointing to the device cellLocations (output)
err = hipMalloc((void **)&d_cellLocations, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to create a buffer for device cellLocations");
err = hipMemset(d_cellLocations, 255, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to initialize d_cellLocations");
/// DEBUG ///
err = hipMalloc((void **)&d_gridCounts, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to create a buffer for device gridCounts");
err = hipMemset(d_gridCounts, 0, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to initialize d_gridCounts");
int threadBlockSize = BLOCK_SIZE;
dim3 dimGrid;
dimGrid.x = (globalNumOfCells - 1) / threadBlockSize + 1;
dimGrid.y = dimGrid.z = 1;
dim3 dimBlock(threadBlockSize, 1, 1);
hipLaunchKernelGGL(( InitialCellLocation), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexPositions,
d_tetrahedralConnectivities,
d_cellLocations,
xRes, yRes, zRes,
minX, minY, minZ,
dx, dy, dz,
epsilon,
globalNumOfCells,
d_gridCounts);
err = hipDeviceSynchronize();
if (err) lcs::Error("Fail to launch the initial location kernel");
err = hipMemcpy(initialCellLocations, d_cellLocations, sizeof(int) * numOfGridPoints, hipMemcpyDeviceToHost);
if (err) lcs::Error("Fail to read device initialCellLocations");
err = hipMemcpy(gridCounts, d_gridCounts, sizeof(int) * numOfGridPoints, hipMemcpyDeviceToHost);
if (err) lcs::Error("Fail to read device gridCounts");
hipFree(d_cellLocations);
hipFree(d_gridCounts);
}
void LaunchGPUForNaiveTracing(double *globalVertexPositions,
double *globalStartVelocities,
double *globalEndVelocities,
int *globalTetrahedralConnectivities,
int *globalTetrahedralLinks,
int *stage,
double *lastPosition,
double *k1,
double *k2,
double *k3,
double *pastTimes,
int *cellLocations,
double startTime, double endTime, double timeStep,
double epsilon,
int *activeParticles,
int numOfActiveParticles
) {
int threadBlockSize = BLOCK_SIZE;
dim3 dimGrid;
dimGrid.x = (numOfActiveParticles - 1) / threadBlockSize + 1;
dimGrid.y = dimGrid.z = 1;
dim3 dimBlock(threadBlockSize, 1, 1);
hipError_t err;
hipFuncSetCacheConfig(NaiveTracing, hipFuncCachePreferShared);
int tt = clock();
hipLaunchKernelGGL(( NaiveTracing), dim3(dimGrid), dim3(dimBlock), 0, 0, globalVertexPositions,
globalStartVelocities,
globalEndVelocities,
globalTetrahedralConnectivities,
globalTetrahedralLinks,
//stage,
lastPosition,
//k1, k2, k3,
pastTimes,
cellLocations,
startTime, endTime, timeStep, epsilon,
activeParticles,
numOfActiveParticles);
printf("time : %lf\n", (double)(clock() - tt) / CLOCKS_PER_SEC);
err = hipDeviceSynchronize();
printf("err string = %s\n", hipGetErrorString(err));
if (err) lcs::Error("Fail to launch the naive tracing kernel");
}
|
68b5e6f0a7918e6e5875b5a3cf32ab284cbaa76c.cu
|
#include <cstdio>
#include <ctime>
#include "lcs.h"
#include "cuda_runtime.h"
#include "CUDAHostFunctions.h"
#include "CUDAKernels.h"
void LaunchGPUForInitialCellLocation(double minX, double maxX, double minY, double maxY, double minZ, double maxZ,
int xRes, int yRes, int zRes,
int *&initialCellLocations,
int *&gridCounts,
int *&d_cellLocations,
int *&d_gridCounts,
int globalNumOfCells,
double *d_vertexPositions,
int *d_tetrahedralConnectivities,
double epsilon) {
/// DEBUG ///
//printf("**********************************************epsilon = %lf\n", epsilon);
cudaError_t err;
double dx = (maxX - minX) / xRes;
double dy = (maxY - minY) / yRes;
double dz = (maxZ - minZ) / zRes;
int numOfGridPoints = (xRes + 1) * (yRes + 1) * (zRes + 1);
initialCellLocations = new int [numOfGridPoints];
//memset(initialCellLocations, 255, sizeof(int) * numOfGridPoints);
for (int i = 0; i < numOfGridPoints; i++)
initialCellLocations[i] = -1;
gridCounts = new int [numOfGridPoints];
memset(gridCounts, 0, sizeof(int) * numOfGridPoints);
// Create CUDA C buffer pointing to the device cellLocations (output)
err = cudaMalloc((void **)&d_cellLocations, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to create a buffer for device cellLocations");
err = cudaMemset(d_cellLocations, 255, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to initialize d_cellLocations");
/// DEBUG ///
err = cudaMalloc((void **)&d_gridCounts, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to create a buffer for device gridCounts");
err = cudaMemset(d_gridCounts, 0, sizeof(int) * numOfGridPoints);
if (err) lcs::Error("Fail to initialize d_gridCounts");
int threadBlockSize = BLOCK_SIZE;
dim3 dimGrid;
dimGrid.x = (globalNumOfCells - 1) / threadBlockSize + 1;
dimGrid.y = dimGrid.z = 1;
dim3 dimBlock(threadBlockSize, 1, 1);
InitialCellLocation<<<dimGrid, dimBlock>>>(d_vertexPositions,
d_tetrahedralConnectivities,
d_cellLocations,
xRes, yRes, zRes,
minX, minY, minZ,
dx, dy, dz,
epsilon,
globalNumOfCells,
d_gridCounts);
err = cudaDeviceSynchronize();
if (err) lcs::Error("Fail to launch the initial location kernel");
err = cudaMemcpy(initialCellLocations, d_cellLocations, sizeof(int) * numOfGridPoints, cudaMemcpyDeviceToHost);
if (err) lcs::Error("Fail to read device initialCellLocations");
err = cudaMemcpy(gridCounts, d_gridCounts, sizeof(int) * numOfGridPoints, cudaMemcpyDeviceToHost);
if (err) lcs::Error("Fail to read device gridCounts");
cudaFree(d_cellLocations);
cudaFree(d_gridCounts);
}
void LaunchGPUForNaiveTracing(double *globalVertexPositions,
double *globalStartVelocities,
double *globalEndVelocities,
int *globalTetrahedralConnectivities,
int *globalTetrahedralLinks,
int *stage,
double *lastPosition,
double *k1,
double *k2,
double *k3,
double *pastTimes,
int *cellLocations,
double startTime, double endTime, double timeStep,
double epsilon,
int *activeParticles,
int numOfActiveParticles
) {
int threadBlockSize = BLOCK_SIZE;
dim3 dimGrid;
dimGrid.x = (numOfActiveParticles - 1) / threadBlockSize + 1;
dimGrid.y = dimGrid.z = 1;
dim3 dimBlock(threadBlockSize, 1, 1);
cudaError_t err;
cudaFuncSetCacheConfig(NaiveTracing, cudaFuncCachePreferShared);
int tt = clock();
NaiveTracing<<<dimGrid, dimBlock>>>(globalVertexPositions,
globalStartVelocities,
globalEndVelocities,
globalTetrahedralConnectivities,
globalTetrahedralLinks,
//stage,
lastPosition,
//k1, k2, k3,
pastTimes,
cellLocations,
startTime, endTime, timeStep, epsilon,
activeParticles,
numOfActiveParticles);
printf("time : %lf\n", (double)(clock() - tt) / CLOCKS_PER_SEC);
err = cudaDeviceSynchronize();
printf("err string = %s\n", cudaGetErrorString(err));
if (err) lcs::Error("Fail to launch the naive tracing kernel");
}
|
3c604e135a5c0eafa1a3237019d216d64c937089.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
__global__ void addKernel(uchar3 **pSrcImg, uchar3* pDstImg, int imgW, int imgH)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < imgW && y < imgH)
{
//
int offset = y * imgW + x;
// RGB
uchar3 pixel1 = pSrcImg[0][offset];
uchar3 pixel2 = pSrcImg[1][offset];
pDstImg[offset].x =uchar(pixel1.x + pixel2.x);
pDstImg[offset].y =uchar(pixel1.y + pixel2.y);
pDstImg[offset].z =uchar(pixel1.z + pixel2.z);
}
}
int main()
{
//OpenCV
Mat img[2];
img[0]=imread("../data/test.jpg");
img[1]=imread("../data/NASA_Mars_Rover.jpg");
int imgH=img[0].rows;
int imgW=img[0].cols;
//
Mat dstImg=Mat::zeros(imgH, imgW, CV_8UC3);
//
uchar3 **pImg=(uchar3**)malloc(sizeof(uchar3*)*2); //
//
uchar3 **pDevice;//
uchar3 *pDeviceData;//
uchar3 *pDstImgData;//
//GPU
//GPU
hipMalloc(&pDstImgData, imgW*imgH*sizeof(uchar3));
//GPU
hipMalloc(&pDevice, sizeof(uchar3*)*2);
//GPU
hipMalloc(&pDeviceData, sizeof(uchar3)*imgH*imgW*2);
//
for (int i=0; i<2; i++)
{
pImg[i]=pDeviceData+i*imgW*imgH;
}
//GPU
//GPU
hipMemcpy(pDevice, pImg, sizeof(uchar3*)*2, hipMemcpyHostToDevice);
//() GPU
hipMemcpy(pDeviceData, img[0].data, sizeof(uchar3)*imgH*imgW, hipMemcpyHostToDevice);
hipMemcpy(pDeviceData+imgH*imgW, img[1].data, sizeof(uchar3)*imgH*imgW, hipMemcpyHostToDevice);
//lenamoon
dim3 block(8, 8);
dim3 grid( (imgW+block.x-1)/block.x, (imgH+block.y-1)/block.y);
hipLaunchKernelGGL(( addKernel), dim3(grid), dim3(block), 0, 0, pDevice, pDstImgData, imgW, imgH);
hipDeviceSynchronize();
//
hipMemcpy(dstImg.data, pDstImgData, imgW*imgH*sizeof(uchar3), hipMemcpyDeviceToHost);
imwrite("../Thsis.jpg", dstImg);
CHECK(hipDeviceReset());
return 0;
}
|
3c604e135a5c0eafa1a3237019d216d64c937089.cu
|
#include "common.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
__global__ void addKernel(uchar3 **pSrcImg, uchar3* pDstImg, int imgW, int imgH)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < imgW && y < imgH)
{
//获取灰度图的全局坐标,即矩阵拉伸为一维数组
int offset = y * imgW + x;
// 分别获取两张RGB三个通道对应的数值
uchar3 pixel1 = pSrcImg[0][offset];
uchar3 pixel2 = pSrcImg[1][offset];
pDstImg[offset].x =uchar(pixel1.x + pixel2.x);
pDstImg[offset].y =uchar(pixel1.y + pixel2.y);
pDstImg[offset].z =uchar(pixel1.z + pixel2.z);
}
}
int main()
{
//OpenCV读取两幅图像
Mat img[2];
img[0]=imread("../data/test.jpg");
img[1]=imread("../data/NASA_Mars_Rover.jpg");
int imgH=img[0].rows;
int imgW=img[0].cols;
//输出图像
Mat dstImg=Mat::zeros(imgH, imgW, CV_8UC3);
//主机指针
uchar3 **pImg=(uchar3**)malloc(sizeof(uchar3*)*2); //输入 二级指针
//设备指针
uchar3 **pDevice;//输入 二级指针
uchar3 *pDeviceData;//输入 一级指针
uchar3 *pDstImgData;//输出图像对应设备指针
//分配GPU内存
//目标输出图像分配GPU内存
cudaMalloc(&pDstImgData, imgW*imgH*sizeof(uchar3));
//设备二级指针分配GPU内存
cudaMalloc(&pDevice, sizeof(uchar3*)*2);
//设备一级指针分配GPU内存
cudaMalloc(&pDeviceData, sizeof(uchar3)*imgH*imgW*2);
//关键:主机二级指针指向设备一级指针位置,这样才能使设备的二级指针指向设备的一级指针位置
for (int i=0; i<2; i++)
{
pImg[i]=pDeviceData+i*imgW*imgH;
}
//拷贝数据到GPU
//拷贝主机二级指针中的元素到设备二级指针指向的GPU位置 (这个二级指针中的元素是设备中一级指针的地址)
cudaMemcpy(pDevice, pImg, sizeof(uchar3*)*2, cudaMemcpyHostToDevice);
//拷贝图像数据(主机一级指针指向主机内存) 到 设备一级指针指向的GPU内存中
cudaMemcpy(pDeviceData, img[0].data, sizeof(uchar3)*imgH*imgW, cudaMemcpyHostToDevice);
cudaMemcpy(pDeviceData+imgH*imgW, img[1].data, sizeof(uchar3)*imgH*imgW, cudaMemcpyHostToDevice);
//核函数实现lena图和moon图的简单加权和
dim3 block(8, 8);
dim3 grid( (imgW+block.x-1)/block.x, (imgH+block.y-1)/block.y);
addKernel<<<grid, block>>>(pDevice, pDstImgData, imgW, imgH);
cudaThreadSynchronize();
//拷贝输出图像数据至主机,并写入到本地
cudaMemcpy(dstImg.data, pDstImgData, imgW*imgH*sizeof(uchar3), cudaMemcpyDeviceToHost);
imwrite("../Thsis.jpg", dstImg);
CHECK(cudaDeviceReset());
return 0;
}
|
f5b62d1ffdc1813c7a8a8ac81ab3ed0d3f51196a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include "../../CudaHelper.h"
const unsigned int BLOCK_SIZE_X = 16; // block x size
const unsigned int BLOCK_SIZE_Y = 16; // block y size
const unsigned int GRID_SIZE_X = 4096/16; // grid x size
const unsigned int GRID_SIZE_Y = 4096/16; // grid y size
const unsigned int IMAGE_WIDTH = 4096; // image width size
const unsigned int IMAGE_HEIGHT = 4096; // image height size
const unsigned int KERNEL_SIZE = 5; // Gaussian Kernel Size
unsigned char* GenerateRandGrayPics(unsigned int width, unsigned int height)
{
unsigned char* pImg = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pImg) { printf("malloc img buffer failed!!!\n"); return NULL;}
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
{
pImg[i * width + j] = rand()%256; // 0~255
}
}
return pImg;
}
bool CompareGrayImg(unsigned char* pImgA, unsigned char* pImgB, unsigned int width, unsigned height)
{
bool ret = true;
if((NULL == pImgA)||(NULL == pImgB))
{
printf("input img is empty!!!\n");
return false;
}
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
{
if(pImgA[i * width + j] != pImgB[i * width + j])
{
printf("img[%d][%d] gray value is not the same, pImgA is %d, pImgB is %d\n ",
i, j, pImgA[i * width + j], pImgB[i * width + j]);
ret = false;
}
}
}
if(ret)
{
printf("Compare 2D Gray Img Passed!!!\n");
}
else
{
printf("Compare 2D Gray Img Failed!!!\n");
}
return ret;
}
void GaussianSmooth_CPU(unsigned char* pInputImg, unsigned char *pOutputImg, unsigned int width, unsigned height)
{
int gs_kernel[KERNEL_SIZE][KERNEL_SIZE] =
{
{1, 4, 7, 4, 1},
{4, 16, 26, 16, 4},
{7, 26, 41, 26, 7},
{4, 16, 26, 16, 4},
{1, 4, 7, 4, 1}
}; // kernel sum is 273
// loop in every pixel(height * width)
for (int row = 0; row < height; row++)
{
for(int col = 0; col < width; col++)
{
int sum = 0;
int out_row = row;
int out_col = col;
// loop in every kernel(KERNEL_SIZE * KERNEL_SIZE), for pixel img begin from-KERNEL_SIZE/2;
// padding pixel value is the edge pixel value
for( int i = -KERNEL_SIZE/2; i < KERNEL_SIZE/2; i++)
{
for( int j = -KERNEL_SIZE/2; j < KERNEL_SIZE/2; j++)
{
row = row + i;
col = col + j;
row = min(max(0, row), width -1);
col = min(max(0, col), height -1);
unsigned char tmpPixel = *(pInputImg + width * row + col);
sum += tmpPixel * gs_kernel[i+KERNEL_SIZE/2][j+KERNEL_SIZE/2];
}
}
int final_pixel = sum/273;
if(final_pixel < 0)
{
final_pixel = 0;
}
else if(final_pixel > 255)
{
final_pixel = 255;
}
*(pOutputImg + out_row * width + out_col) = final_pixel;
}
}
}
// Kernel function to compute square sum of an int array to a result
__global__ void GaussianSmooth_Kernel(unsigned char *pInputImg, unsigned char *pOutputImg, int* width, int* height)
{
int row = threadIdx.x +blockDim.x * blockIdx.x;
int col = threadIdx.y +blockDim.y * blockIdx.y;
int gs_kernel[KERNEL_SIZE][KERNEL_SIZE] =
{
{1, 4, 7, 4, 1},
{4, 16, 26, 16, 4},
{7, 26, 41, 26, 7},
{4, 16, 26, 16, 4},
{1, 4, 7, 4, 1}
}; // kernel sum is 273
int sum = 0;
int out_row = row;
int out_col = col;
// loop in every kernel(KERNEL_SIZE * KERNEL_SIZE), for pixel img begin from-KERNEL_SIZE/2;
// padding pixel value is the edge pixel value
for( int i = -KERNEL_SIZE/2; i < KERNEL_SIZE/2; i++)
{
for( int j = -KERNEL_SIZE/2; j < KERNEL_SIZE/2; j++)
{
row = row + i;
col = col + j;
row = min(max(0, row), *width -1);
col = min(max(0, col), *height -1);
unsigned char tmpPixel = *(pInputImg + *width * row + col);
sum += tmpPixel * gs_kernel[i+KERNEL_SIZE/2][j+KERNEL_SIZE/2];
}
}
int final_pixel = sum/273;
if(final_pixel < 0)
{
final_pixel = 0;
}
else if(final_pixel > 255)
{
final_pixel = 255;
}
*(pOutputImg + out_row * (*width) + out_col) = final_pixel;
}
int main(int argv, char* argc[])
{
// deal with input param
int blockSizeX = BLOCK_SIZE_X;
int blockSizeY = BLOCK_SIZE_Y;
int gridSizeX = GRID_SIZE_X;
int gridSizeY = GRID_SIZE_Y;
int width = IMAGE_WIDTH;
int height = IMAGE_HEIGHT;
if(argv > 1)
{
blockSizeX = atoi(argc[1]);
}
if(argv > 2)
{
blockSizeY = atoi(argc[2]);
}
if(argv > 3)
{
gridSizeX = atoi(argc[3]);
}
if(argv > 4)
{
gridSizeY = atoi(argc[4]);
}
if(argv > 5)
{
width = atoi(argc[5]);
}
if(argv > 6)
{
height = atoi(argc[6]);
}
printf("blockSizeX is %d\n", blockSizeX);
printf("blockSizeY is %d\n", blockSizeY);
printf("gridSizeX is %d\n", gridSizeX);
printf("gridSizeY is %d\n", gridSizeY);
printf("width is %d\n", width);
printf("height is %d\n", height);
// Get cuda device count
int iCount;
hipGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
hipSetDevice(i);
// Malloc host data
unsigned char *pHostInputImg = GenerateRandGrayPics(width, height);
if(NULL == pHostInputImg) { printf("malloc host input img buffer failed!!!\n"); return -1;}
unsigned char* pHostOutputImg = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pHostOutputImg) { printf("malloc host output img buffer failed!!!\n"); return -1;}
// Malloc device data
unsigned char *pDeviceInputImg = NULL;
unsigned char *pDeviceOutputImg = NULL;
int *pDeviceImageWidth = NULL;
int *pDeviceImageHeight = NULL;
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceInputImg, sizeof(unsigned char) * width * height));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceOutputImg, sizeof(unsigned char) * width * height));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceImageWidth, sizeof(int)));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceImageHeight, sizeof(int)));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyHostToDevice);
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceInputImg, pHostInputImg, sizeof(unsigned char) * width * height, hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceImageWidth, &width, sizeof(int), hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceImageHeight, &height, sizeof(int), hipMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(GaussianSmoothKernel);
// Set the minium of const GRID_SIZE and computation result of grid size based on image input size to the final grid size
int grid_size_x = min((width + blockSizeX-1)/blockSizeX, gridSizeX);
int grid_size_y = min((height + blockSizeY-1)/blockSizeY, gridSizeY);
dim3 block(blockSizeX, blockSizeY, 1);
dim3 grid(grid_size_x, grid_size_y, 1);
hipLaunchKernelGGL(( GaussianSmooth_Kernel), dim3(grid), dim3(block), 0, 0, pDeviceInputImg, pDeviceOutputImg, pDeviceImageWidth, pDeviceImageHeight);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
{
printf("%s\n", hipGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(GaussianSmoothKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(hipMemcpy(pHostOutputImg, pDeviceOutputImg, sizeof(unsigned char) * width * height, hipMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(hipFree(pDeviceInputImg));
HANDLE_CUDA_ERROR(hipFree(pDeviceOutputImg));
HANDLE_CUDA_ERROR(hipFree(pDeviceImageWidth));
HANDLE_CUDA_ERROR(hipFree(pDeviceImageHeight));
// hipDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(hipDeviceReset());
printf("GPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
unsigned char* pHostOutputImg_CPU = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pHostOutputImg_CPU) { printf("malloc host output img buffer failed!!!\n"); return -1;}
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
GaussianSmooth_CPU(pHostInputImg, pHostOutputImg_CPU, width, height);
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
printf("CPU COMPUTE END********************\n");
//Print Compare Compute Result
CompareGrayImg(pHostOutputImg,pHostOutputImg_CPU, width, height);
// Free host memory
free(pHostInputImg); pHostInputImg = NULL;
free(pHostOutputImg); pHostOutputImg = NULL;
free(pHostOutputImg_CPU); pHostOutputImg_CPU = NULL;
return 0;
}
|
f5b62d1ffdc1813c7a8a8ac81ab3ed0d3f51196a.cu
|
#include "stdio.h"
#include <cuda_runtime.h>
#include "../../CudaHelper.h"
const unsigned int BLOCK_SIZE_X = 16; // block x size
const unsigned int BLOCK_SIZE_Y = 16; // block y size
const unsigned int GRID_SIZE_X = 4096/16; // grid x size
const unsigned int GRID_SIZE_Y = 4096/16; // grid y size
const unsigned int IMAGE_WIDTH = 4096; // image width size
const unsigned int IMAGE_HEIGHT = 4096; // image height size
const unsigned int KERNEL_SIZE = 5; // Gaussian Kernel Size
unsigned char* GenerateRandGrayPics(unsigned int width, unsigned int height)
{
unsigned char* pImg = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pImg) { printf("malloc img buffer failed!!!\n"); return NULL;}
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
{
pImg[i * width + j] = rand()%256; // 0~255
}
}
return pImg;
}
bool CompareGrayImg(unsigned char* pImgA, unsigned char* pImgB, unsigned int width, unsigned height)
{
bool ret = true;
if((NULL == pImgA)||(NULL == pImgB))
{
printf("input img is empty!!!\n");
return false;
}
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
{
if(pImgA[i * width + j] != pImgB[i * width + j])
{
printf("img[%d][%d] gray value is not the same, pImgA is %d, pImgB is %d\n ",
i, j, pImgA[i * width + j], pImgB[i * width + j]);
ret = false;
}
}
}
if(ret)
{
printf("Compare 2D Gray Img Passed!!!\n");
}
else
{
printf("Compare 2D Gray Img Failed!!!\n");
}
return ret;
}
void GaussianSmooth_CPU(unsigned char* pInputImg, unsigned char *pOutputImg, unsigned int width, unsigned height)
{
int gs_kernel[KERNEL_SIZE][KERNEL_SIZE] =
{
{1, 4, 7, 4, 1},
{4, 16, 26, 16, 4},
{7, 26, 41, 26, 7},
{4, 16, 26, 16, 4},
{1, 4, 7, 4, 1}
}; // kernel sum is 273
// loop in every pixel(height * width)
for (int row = 0; row < height; row++)
{
for(int col = 0; col < width; col++)
{
int sum = 0;
int out_row = row;
int out_col = col;
// loop in every kernel(KERNEL_SIZE * KERNEL_SIZE), for pixel img begin from -KERNEL_SIZE/2;
// padding pixel value is the edge pixel value
for( int i = -KERNEL_SIZE/2; i < KERNEL_SIZE/2; i++)
{
for( int j = -KERNEL_SIZE/2; j < KERNEL_SIZE/2; j++)
{
row = row + i;
col = col + j;
row = min(max(0, row), width -1);
col = min(max(0, col), height -1);
unsigned char tmpPixel = *(pInputImg + width * row + col);
sum += tmpPixel * gs_kernel[i+KERNEL_SIZE/2][j+KERNEL_SIZE/2];
}
}
int final_pixel = sum/273;
if(final_pixel < 0)
{
final_pixel = 0;
}
else if(final_pixel > 255)
{
final_pixel = 255;
}
*(pOutputImg + out_row * width + out_col) = final_pixel;
}
}
}
// Kernel function to compute square sum of an int array to a result
__global__ void GaussianSmooth_Kernel(unsigned char *pInputImg, unsigned char *pOutputImg, int* width, int* height)
{
int row = threadIdx.x +blockDim.x * blockIdx.x;
int col = threadIdx.y +blockDim.y * blockIdx.y;
int gs_kernel[KERNEL_SIZE][KERNEL_SIZE] =
{
{1, 4, 7, 4, 1},
{4, 16, 26, 16, 4},
{7, 26, 41, 26, 7},
{4, 16, 26, 16, 4},
{1, 4, 7, 4, 1}
}; // kernel sum is 273
int sum = 0;
int out_row = row;
int out_col = col;
// loop in every kernel(KERNEL_SIZE * KERNEL_SIZE), for pixel img begin from -KERNEL_SIZE/2;
// padding pixel value is the edge pixel value
for( int i = -KERNEL_SIZE/2; i < KERNEL_SIZE/2; i++)
{
for( int j = -KERNEL_SIZE/2; j < KERNEL_SIZE/2; j++)
{
row = row + i;
col = col + j;
row = min(max(0, row), *width -1);
col = min(max(0, col), *height -1);
unsigned char tmpPixel = *(pInputImg + *width * row + col);
sum += tmpPixel * gs_kernel[i+KERNEL_SIZE/2][j+KERNEL_SIZE/2];
}
}
int final_pixel = sum/273;
if(final_pixel < 0)
{
final_pixel = 0;
}
else if(final_pixel > 255)
{
final_pixel = 255;
}
*(pOutputImg + out_row * (*width) + out_col) = final_pixel;
}
int main(int argv, char* argc[])
{
// deal with input param
int blockSizeX = BLOCK_SIZE_X;
int blockSizeY = BLOCK_SIZE_Y;
int gridSizeX = GRID_SIZE_X;
int gridSizeY = GRID_SIZE_Y;
int width = IMAGE_WIDTH;
int height = IMAGE_HEIGHT;
if(argv > 1)
{
blockSizeX = atoi(argc[1]);
}
if(argv > 2)
{
blockSizeY = atoi(argc[2]);
}
if(argv > 3)
{
gridSizeX = atoi(argc[3]);
}
if(argv > 4)
{
gridSizeY = atoi(argc[4]);
}
if(argv > 5)
{
width = atoi(argc[5]);
}
if(argv > 6)
{
height = atoi(argc[6]);
}
printf("blockSizeX is %d\n", blockSizeX);
printf("blockSizeY is %d\n", blockSizeY);
printf("gridSizeX is %d\n", gridSizeX);
printf("gridSizeY is %d\n", gridSizeY);
printf("width is %d\n", width);
printf("height is %d\n", height);
// Get cuda device count
int iCount;
cudaGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
cudaSetDevice(i);
// Malloc host data
unsigned char *pHostInputImg = GenerateRandGrayPics(width, height);
if(NULL == pHostInputImg) { printf("malloc host input img buffer failed!!!\n"); return -1;}
unsigned char* pHostOutputImg = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pHostOutputImg) { printf("malloc host output img buffer failed!!!\n"); return -1;}
// Malloc device data
unsigned char *pDeviceInputImg = NULL;
unsigned char *pDeviceOutputImg = NULL;
int *pDeviceImageWidth = NULL;
int *pDeviceImageHeight = NULL;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceInputImg, sizeof(unsigned char) * width * height));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceOutputImg, sizeof(unsigned char) * width * height));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceImageWidth, sizeof(int)));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceImageHeight, sizeof(int)));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyHostToDevice);
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceInputImg, pHostInputImg, sizeof(unsigned char) * width * height, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceImageWidth, &width, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceImageHeight, &height, sizeof(int), cudaMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(GaussianSmoothKernel);
// Set the minium of const GRID_SIZE and computation result of grid size based on image input size to the final grid size
int grid_size_x = min((width + blockSizeX-1)/blockSizeX, gridSizeX);
int grid_size_y = min((height + blockSizeY-1)/blockSizeY, gridSizeY);
dim3 block(blockSizeX, blockSizeY, 1);
dim3 grid(grid_size_x, grid_size_y, 1);
GaussianSmooth_Kernel<<<grid, block>>>(pDeviceInputImg, pDeviceOutputImg, pDeviceImageWidth, pDeviceImageHeight);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(GaussianSmoothKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(cudaMemcpy(pHostOutputImg, pDeviceOutputImg, sizeof(unsigned char) * width * height, cudaMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(cudaFree(pDeviceInputImg));
HANDLE_CUDA_ERROR(cudaFree(pDeviceOutputImg));
HANDLE_CUDA_ERROR(cudaFree(pDeviceImageWidth));
HANDLE_CUDA_ERROR(cudaFree(pDeviceImageHeight));
// cudaDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(cudaDeviceReset());
printf("GPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
unsigned char* pHostOutputImg_CPU = (unsigned char*)malloc(width * height * sizeof(unsigned char));
if(NULL == pHostOutputImg_CPU) { printf("malloc host output img buffer failed!!!\n"); return -1;}
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
GaussianSmooth_CPU(pHostInputImg, pHostOutputImg_CPU, width, height);
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
printf("CPU COMPUTE END********************\n");
//Print Compare Compute Result
CompareGrayImg(pHostOutputImg,pHostOutputImg_CPU, width, height);
// Free host memory
free(pHostInputImg); pHostInputImg = NULL;
free(pHostOutputImg); pHostOutputImg = NULL;
free(pHostOutputImg_CPU); pHostOutputImg_CPU = NULL;
return 0;
}
|
426e0b1976ee0be2a7bb7163838ff57dbe5162d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
hipError_t err = hipGetLastError();\
if( err != hipSuccess){\
printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__device__ unsigned int exor(unsigned long a,unsigned long b)
{
unsigned int res;
for (int i = 63; i >= 0; i--)
{
// Find current bits in x and y
bool b1 = a & (1 << i);
bool b2 = b & (1 << i);
// If both are 1 then 0 else xor is same as OR
bool xoredBit = (b1 & b2) ? 0 : (b1 | b2);
// Update result
res <<= 1;
res |= xoredBit;
}
//res=exor(a,b);
return res;
}
__global__ void calculate(unsigned long *mem,unsigned long num,int l,unsigned long space)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= num/2*(l+1))
return;
unsigned long tmp=i*2*space;
if(tmp+space<num)
mem[tmp]=exor(mem[tmp],mem[tmp+space]);
}
int main(int argc, char **argv){
struct timeval start, end, t_start, t_end;
int i,blocks=0;
unsigned long *p1,*g1;
unsigned long seed,num;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
seed=atoi(argv[2]);
}
p1 = (unsigned long *)malloc((num+1) *sizeof(unsigned long));
srand(seed);
for(i=0; i<num; ++i){
p1[i]=random();
}
p1[i]=0;
gettimeofday(&t_start, NULL);
hipMalloc(&g1, (num+1) * sizeof(unsigned long));
CUDA_ERROR_EXIT("hipMalloc");
hipMemcpy(g1, p1, (num+1) * sizeof(unsigned long) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
gettimeofday(&start, NULL);
blocks=num/1024;
if(num%1024)
++blocks;
for(i=0;i<log(num)/log(2);i++){
hipLaunchKernelGGL(( calculate), dim3(blocks),dim3(1024), 0, 0, g1,num,i,(unsigned long)pow(2,i));
}
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
hipMemcpy(p1, g1, (num+1) * sizeof(unsigned long), hipMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("The Final XOR Value is %lu\n",p1[0]);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
hipFree(g1);
free(p1);
return 0;
}
|
426e0b1976ee0be2a7bb7163838ff57dbe5162d6.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__device__ unsigned int exor(unsigned long a,unsigned long b)
{
unsigned int res;
for (int i = 63; i >= 0; i--)
{
// Find current bits in x and y
bool b1 = a & (1 << i);
bool b2 = b & (1 << i);
// If both are 1 then 0 else xor is same as OR
bool xoredBit = (b1 & b2) ? 0 : (b1 | b2);
// Update result
res <<= 1;
res |= xoredBit;
}
//res=exor(a,b);
return res;
}
__global__ void calculate(unsigned long *mem,unsigned long num,int l,unsigned long space)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= num/2*(l+1))
return;
unsigned long tmp=i*2*space;
if(tmp+space<num)
mem[tmp]=exor(mem[tmp],mem[tmp+space]);
}
int main(int argc, char **argv){
struct timeval start, end, t_start, t_end;
int i,blocks=0;
unsigned long *p1,*g1;
unsigned long seed,num;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
seed=atoi(argv[2]);
}
p1 = (unsigned long *)malloc((num+1) *sizeof(unsigned long));
srand(seed);
for(i=0; i<num; ++i){
p1[i]=random();
}
p1[i]=0;
gettimeofday(&t_start, NULL);
cudaMalloc(&g1, (num+1) * sizeof(unsigned long));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(g1, p1, (num+1) * sizeof(unsigned long) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks=num/1024;
if(num%1024)
++blocks;
for(i=0;i<log(num)/log(2);i++){
calculate<<<blocks,1024>>>(g1,num,i,(unsigned long)pow(2,i));
}
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(p1, g1, (num+1) * sizeof(unsigned long), cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("The Final XOR Value is %lu\n",p1[0]);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(g1);
free(p1);
return 0;
}
|
45967fd43377afb6de5cb3925d88f57a6d7e7043.hip
|
// !!! This is a file automatically generated by hipify!!!
// ============ Matrix inversion using cuBLAS library ============ //
//
// To compile as a standalone test program:
//
// 1. Make sure libcublas.so is in the search path
// 2. cd to build/ directory
// 3. nvcc -o cuda_inverse -arch=sm_35 -lcublas -DCUDA_TEST_MAIN
// ../src/Numerics/CUDA/cuda_inverse.cu
//
// =============================================================== //
#include <cstdio>
#include <unistd.h>
#include <sstream>
#include <vector>
#include <iostream>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#define CONVERT_BS 256
#define INVERSE_BS 16
void
checkCUDAError (hipError_t err, char *funcName)
{
if (err != hipSuccess)
{
fprintf(stderr, "CUDA error in %s \n", funcName);
fprintf(stderr, "CUDA error message : %s \n", hipGetErrorString(err));
fflush(stderr);
abort();
}
}
void
checkCublasError(hipblasStatus_t status, char *funcName)
{
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "CUBLAS error in %s \n", funcName);
fprintf(stderr, "CUBLAS error message: ");
switch (status)
{
case HIPBLAS_STATUS_NOT_INITIALIZED:
fprintf(stderr, "HIPBLAS_STATUS_NOT_INITIALIZED\n");
break;
case HIPBLAS_STATUS_ALLOC_FAILED:
fprintf(stderr, "HIPBLAS_STATUS_ALLOC_FAILED\n");
break;
case HIPBLAS_STATUS_INVALID_VALUE:
fprintf(stderr, "HIPBLAS_STATUS_INVALID_VALUE\n");
break;
case HIPBLAS_STATUS_ARCH_MISMATCH:
fprintf(stderr, "HIPBLAS_STATUS_ARCH_MISMATCH\n");
break;
case HIPBLAS_STATUS_MAPPING_ERROR:
fprintf(stderr, "HIPBLAS_STATUS_MAPPING_ERROR\n");
break;
case HIPBLAS_STATUS_EXECUTION_FAILED:
fprintf(stderr, "HIPBLAS_STATUS_EXECUTION_FAILED\n");
break;
case HIPBLAS_STATUS_INTERNAL_ERROR:
fprintf(stderr, "HIPBLAS_STATUS_INTERNAL_ERROR\n");
break;
#if (TORCH_HIP_VERSION >= 6050)
case HIPBLAS_STATUS_NOT_SUPPORTED:
fprintf(stderr, "HIPBLAS_STATUS_NOT_SUPPORTED\n");
break;
case CUBLAS_STATUS_LICENSE_ERROR:
fprintf(stderr, "CUBLAS_STATUS_LICENSE_ERROR\n");
break;
#endif
default:
fprintf(stderr, "unknown\n");
}
fflush(stderr);
abort();
}
}
// Convert matrix elements from one type (Tsrc) in the source matrix to
// another type (Tdest) and put them in the destination matrix
// (assumed src and dest have the same dimensions)
template <typename Tdest, typename Tsrc>
__global__ void
convert (Tdest **dest_list, Tsrc **src_list, int len)
{
__shared__ Tsrc *mysrc;
__shared__ Tdest *mydest;
if (threadIdx.x == 0)
{
mysrc = src_list[blockIdx.y];
mydest = dest_list[blockIdx.y];
}
__syncthreads();
int i = blockIdx.x * CONVERT_BS + threadIdx.x;
if (i < len)
mydest[i] = (Tdest) mysrc[i];
}
// Two matrix inversion functions
// 1. for float matrices
// useHigherPrecision = false --> single precision operations
// useHigherPrecision = true --> double precision operations
void
cublas_inverse (hipblasHandle_t handle,
float *Alist_d[], float *Ainvlist_d[],
float *AWorklist_d[], float *AinvWorklist_d[],
int N, int rowStride, int numMats,
bool useHigherPrecision)
{
hipError_t err;
hipblasStatus_t status;
// Info array tells if a matrix inversion is successful
// = 0 : successful
// = k : U(k,k) = 0; inversion failed
int *infoArray;
err = hipMalloc((void**) &infoArray, numMats * sizeof(int));
checkCUDAError(err, "Failed to allocate memory for infoArray in cublas_inverse (hipMalloc)");
// If double precision operations are desired...
if (useHigherPrecision)
{
// (i) convert elements in Alist from float to double, put them in AWorklist
dim3 dimBlockConvert (CONVERT_BS);
dim3 dimGridConvert ((N*rowStride + (CONVERT_BS-1)) / CONVERT_BS, numMats);
hipLaunchKernelGGL(( convert) , dim3(dimGridConvert), dim3(dimBlockConvert) , 0, 0, (double**)AWorklist_d, Alist_d, N*rowStride);
// (ii) call cublas to do matrix inversion
// LU decomposition
status = hipblasDgetrfBatched(handle, N, (double**)AWorklist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (hipblasDgetrfBatched)");
// Inversion
status = hipblasDgetriBatched(handle, N, (double**)AWorklist_d, rowStride, NULL,
(double**)AinvWorklist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (hipblasDgetriBatched)");
// (iii) convert results back to single precision
hipLaunchKernelGGL(( convert) , dim3(dimGridConvert), dim3(dimBlockConvert) , 0, 0, Ainvlist_d, (double**)AinvWorklist_d, N*rowStride);
}
// else, carry out single precision operations
else
{
// Call cublas to do matrix inversion
// LU decomposition
status = hipblasSgetrfBatched(handle, N, Alist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (hipblasSgetrfBatched)");
// Inversion
status = hipblasSgetriBatched(handle, N, Alist_d, rowStride, NULL,
Ainvlist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (hipblasSgetriBatched)");
}
hipDeviceSynchronize();
// Free resources
hipFree(infoArray);
}
// 2. for double matrices
void
cublas_inverse (hipblasHandle_t handle,
double *Alist_d[], double *Ainvlist_d[],
double *AWorklist_d[], double *AinvWorklist_d[],
int N, int rowStride, int numMats,
bool useHigherPrecision)
{
hipError_t err;
hipblasStatus_t status;
// Info array tells if a matrix inversion is successful
// = 0 : successful
// = k : U(k,k) = 0; inversion failed
int *infoArray;
err = hipMalloc((void**) &infoArray, numMats * sizeof(int));
checkCUDAError(err, "Failed to allocate memory for infoArray in cublas_inverse (hipMalloc)");
// Call cublas functions to do inversion
// LU decomposition
status = hipblasDgetrfBatched(handle, N, Alist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (hipblasDgetrfBatched)");
// Inversion
status = hipblasDgetriBatched(handle, N, Alist_d, rowStride, NULL,
Ainvlist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (hipblasDgetriBatched)");
hipDeviceSynchronize();
hipFree(infoArray);
}
//////////////////////////////////////////////////////
// Test routines //
//////////////////////////////////////////////////////
#ifdef CUDA_TEST_MAIN
template<typename T>
void
test_cublas_inverse(int matSize, int numMats)
{
hipError_t err;
// Initialize cublas
hipblasHandle_t handle;
hipblasStatus_t status;
status = hipblasCreate(&handle);
checkCublasError(status, "Failed to create cublas handle (hipblasCreate)");
srand48((long) 12394);
int N = matSize;
int row_stride = (matSize+15) / 16 * 16;
T **Alist, **AWorklist;
T **Alist_d, **AWorklist_d;
T **Clist, **CWorklist;
T **Clist_d, **CWorklist_d;
// Allocate arrays of pointers (one set on host, one set on device)
// pointing to the starting address (on device) of each matrix and its buffer
// (similar to DiracDeterminantCUDA)
Alist = (T**) malloc(numMats * sizeof(T*));
err = hipMalloc((void**) &Alist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for Alist_d in test_cublas_inverse (hipMalloc)");
AWorklist = (T**) malloc(numMats * sizeof(T*));
err = hipMalloc((void**) &AWorklist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for AWorklist_d in test_cublas_inverse (hipMalloc)");
Clist = (T**) malloc(numMats * sizeof(T*));
err = hipMalloc((void**) &Clist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for Clist_d in test_cublas_inverse (hipMalloc)");
CWorklist = (T**) malloc(numMats * sizeof(T*));
err = hipMalloc((void**) &CWorklist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for CWorklist_d in test_cublas_inverse (hipMalloc)");
// Generate matrices filled with random numbers
T* A = (T*) malloc(sizeof(T) * numMats * N * row_stride);
for (int j=0; j<numMats; j++)
for (int i=0; i<N*row_stride; i++)
A[j*N*row_stride+i] = 1.0 * (drand48() - 0.5);
// Allocate memory on device for each matrix
for (int mat=0; mat<numMats; mat++)
{
err = hipMalloc((void**) &(Alist[mat]), N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for Alist[mat] in test_cublas_inverse (hipMalloc)");
err = hipMemcpyAsync(Alist[mat], &A[mat*N*row_stride], N * row_stride * sizeof(T), hipMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from A to Alist[mat] (hipMemcpyAsync, HostToDevice)");
err = hipMalloc((void**) &(AWorklist[mat]), 2 * N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for AWorklist[mat] in test_cublas_inverse (hipMalloc)");
err = hipMalloc((void**) &(Clist[mat]), N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for Clist[mat] in test_cublas_inverse (hipMalloc)");
err = hipMalloc((void**) &(CWorklist[mat]), 2 * N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for CWorklist[mat] in test_cublas_inverse (hipMalloc)");
}
// Copy the starting address of each matrix
err = hipMemcpyAsync (Alist_d, Alist, numMats * sizeof(T*), hipMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from Alist to Alist_d (hipMemcpyAsync, HostToDevice)");
err = hipMemcpyAsync (AWorklist_d, AWorklist, numMats * sizeof(T*), hipMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from AWorklist to AWorklist_d (hipMemcpyAsync, HostToDevice)");
err = hipMemcpyAsync (Clist_d, Clist, numMats * sizeof(T*), hipMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from Clist to Clist_d (hipMemcpyAsync, HostToDevice)");
err = hipMemcpyAsync (CWorklist_d, CWorklist, numMats * sizeof(T*), hipMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from CWorklist to CWorklist_d (hipMemcpyAsync, HostToDevice)");
hipDeviceSynchronize();
clock_t start = clock();
// Call cublas functions to do inversion
cublas_inverse (handle, Alist_d, Clist_d, AWorklist_d, CWorklist_d, N, row_stride, numMats, true);
hipDeviceSynchronize();
clock_t end = clock();
double t = double(end-start) / double(CLOCKS_PER_SEC) / double(numMats);
double rate = 1.0 / t;
fprintf (stderr, "Rate is %1.3f matrix inversions per second.\n",
rate);
// Copy A^(-1) back to host memory Ainv; one matrix at a time
// Calculate error of A^(-1)A from unit matrix I
for (int mat=0; mat<numMats; mat++)
{
T Ainv[N*row_stride];
err = hipMemcpy(Ainv, Clist[mat], N * row_stride * sizeof(T), hipMemcpyDeviceToHost);
checkCUDAError(err, "Failed to copy from Clist[mat] to Ainv (hipMemcpy, DeviceToHost)");
double error = 0.0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
{
double val = 0.0;
for (int k=0; k<N; k++)
val += Ainv[i*row_stride+k] * A[mat*N*row_stride+k*row_stride+j];
double diff = (i==j) ? (1.0f - val) : val;
error += diff * diff;
}
fprintf (stderr, "error = %1.8e\n", sqrt(error/(double)(N*N)));
}
// Finalize cublas
status = hipblasDestroy(handle);
checkCublasError(status, "Failed to destroy cublas handle (hipblasDestroy)");
// Free resources on both host and device
for (int mat=0; mat<numMats; mat++)
{
hipFree(Alist[mat]);
hipFree(Clist[mat]);
hipFree(AWorklist[mat]);
hipFree(CWorklist[mat]);
}
hipFree(Alist_d);
hipFree(Clist_d);
hipFree(AWorklist_d);
hipFree(CWorklist_d);
free(Alist);
free(Clist);
free(AWorklist);
free(CWorklist);
free(A);
// Reset device. Required for memory leak debugging
hipDeviceReset();
}
int main(int argc, char** argv)
{
int matSize = 0;
int numMats = 0;
if (argc == 3) {
matSize = atoi(argv[1]);
numMats = atoi(argv[2]);
}
else {
printf("Usage: ./cuda_inverse [matrix size] [number of matrices]\n");
exit(1);
}
test_cublas_inverse<double>(matSize, numMats);
test_cublas_inverse<float>(matSize, numMats);
return 0;
}
#endif
|
45967fd43377afb6de5cb3925d88f57a6d7e7043.cu
|
// ============ Matrix inversion using cuBLAS library ============ //
//
// To compile as a standalone test program:
//
// 1. Make sure libcublas.so is in the search path
// 2. cd to build/ directory
// 3. nvcc -o cuda_inverse -arch=sm_35 -lcublas -DCUDA_TEST_MAIN
// ../src/Numerics/CUDA/cuda_inverse.cu
//
// =============================================================== //
#include <cstdio>
#include <unistd.h>
#include <sstream>
#include <vector>
#include <iostream>
#include <cuda.h>
#include <cublas_v2.h>
#define CONVERT_BS 256
#define INVERSE_BS 16
void
checkCUDAError (cudaError_t err, char *funcName)
{
if (err != cudaSuccess)
{
fprintf(stderr, "CUDA error in %s \n", funcName);
fprintf(stderr, "CUDA error message : %s \n", cudaGetErrorString(err));
fflush(stderr);
abort();
}
}
void
checkCublasError(cublasStatus_t status, char *funcName)
{
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "CUBLAS error in %s \n", funcName);
fprintf(stderr, "CUBLAS error message: ");
switch (status)
{
case CUBLAS_STATUS_NOT_INITIALIZED:
fprintf(stderr, "CUBLAS_STATUS_NOT_INITIALIZED\n");
break;
case CUBLAS_STATUS_ALLOC_FAILED:
fprintf(stderr, "CUBLAS_STATUS_ALLOC_FAILED\n");
break;
case CUBLAS_STATUS_INVALID_VALUE:
fprintf(stderr, "CUBLAS_STATUS_INVALID_VALUE\n");
break;
case CUBLAS_STATUS_ARCH_MISMATCH:
fprintf(stderr, "CUBLAS_STATUS_ARCH_MISMATCH\n");
break;
case CUBLAS_STATUS_MAPPING_ERROR:
fprintf(stderr, "CUBLAS_STATUS_MAPPING_ERROR\n");
break;
case CUBLAS_STATUS_EXECUTION_FAILED:
fprintf(stderr, "CUBLAS_STATUS_EXECUTION_FAILED\n");
break;
case CUBLAS_STATUS_INTERNAL_ERROR:
fprintf(stderr, "CUBLAS_STATUS_INTERNAL_ERROR\n");
break;
#if (CUDA_VERSION >= 6050)
case CUBLAS_STATUS_NOT_SUPPORTED:
fprintf(stderr, "CUBLAS_STATUS_NOT_SUPPORTED\n");
break;
case CUBLAS_STATUS_LICENSE_ERROR:
fprintf(stderr, "CUBLAS_STATUS_LICENSE_ERROR\n");
break;
#endif
default:
fprintf(stderr, "unknown\n");
}
fflush(stderr);
abort();
}
}
// Convert matrix elements from one type (Tsrc) in the source matrix to
// another type (Tdest) and put them in the destination matrix
// (assumed src and dest have the same dimensions)
template <typename Tdest, typename Tsrc>
__global__ void
convert (Tdest **dest_list, Tsrc **src_list, int len)
{
__shared__ Tsrc *mysrc;
__shared__ Tdest *mydest;
if (threadIdx.x == 0)
{
mysrc = src_list[blockIdx.y];
mydest = dest_list[blockIdx.y];
}
__syncthreads();
int i = blockIdx.x * CONVERT_BS + threadIdx.x;
if (i < len)
mydest[i] = (Tdest) mysrc[i];
}
// Two matrix inversion functions
// 1. for float matrices
// useHigherPrecision = false --> single precision operations
// useHigherPrecision = true --> double precision operations
void
cublas_inverse (cublasHandle_t handle,
float *Alist_d[], float *Ainvlist_d[],
float *AWorklist_d[], float *AinvWorklist_d[],
int N, int rowStride, int numMats,
bool useHigherPrecision)
{
cudaError_t err;
cublasStatus_t status;
// Info array tells if a matrix inversion is successful
// = 0 : successful
// = k : U(k,k) = 0; inversion failed
int *infoArray;
err = cudaMalloc((void**) &infoArray, numMats * sizeof(int));
checkCUDAError(err, "Failed to allocate memory for infoArray in cublas_inverse (cudaMalloc)");
// If double precision operations are desired...
if (useHigherPrecision)
{
// (i) convert elements in Alist from float to double, put them in AWorklist
dim3 dimBlockConvert (CONVERT_BS);
dim3 dimGridConvert ((N*rowStride + (CONVERT_BS-1)) / CONVERT_BS, numMats);
convert <<< dimGridConvert, dimBlockConvert >>> ((double**)AWorklist_d, Alist_d, N*rowStride);
// (ii) call cublas to do matrix inversion
// LU decomposition
status = cublasDgetrfBatched(handle, N, (double**)AWorklist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (cublasDgetrfBatched)");
// Inversion
status = cublasDgetriBatched(handle, N, (double**)AWorklist_d, rowStride, NULL,
(double**)AinvWorklist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (cublasDgetriBatched)");
// (iii) convert results back to single precision
convert <<< dimGridConvert, dimBlockConvert >>> (Ainvlist_d, (double**)AinvWorklist_d, N*rowStride);
}
// else, carry out single precision operations
else
{
// Call cublas to do matrix inversion
// LU decomposition
status = cublasSgetrfBatched(handle, N, Alist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (cublasSgetrfBatched)");
// Inversion
status = cublasSgetriBatched(handle, N, Alist_d, rowStride, NULL,
Ainvlist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (cublasSgetriBatched)");
}
cudaDeviceSynchronize();
// Free resources
cudaFree(infoArray);
}
// 2. for double matrices
void
cublas_inverse (cublasHandle_t handle,
double *Alist_d[], double *Ainvlist_d[],
double *AWorklist_d[], double *AinvWorklist_d[],
int N, int rowStride, int numMats,
bool useHigherPrecision)
{
cudaError_t err;
cublasStatus_t status;
// Info array tells if a matrix inversion is successful
// = 0 : successful
// = k : U(k,k) = 0; inversion failed
int *infoArray;
err = cudaMalloc((void**) &infoArray, numMats * sizeof(int));
checkCUDAError(err, "Failed to allocate memory for infoArray in cublas_inverse (cudaMalloc)");
// Call cublas functions to do inversion
// LU decomposition
status = cublasDgetrfBatched(handle, N, Alist_d, rowStride, NULL, infoArray, numMats);
checkCublasError(status, "Problem in LU factorization (cublasDgetrfBatched)");
// Inversion
status = cublasDgetriBatched(handle, N, Alist_d, rowStride, NULL,
Ainvlist_d, rowStride, infoArray, numMats);
checkCublasError(status, "Problem in matrix inversion (cublasDgetriBatched)");
cudaDeviceSynchronize();
cudaFree(infoArray);
}
//////////////////////////////////////////////////////
// Test routines //
//////////////////////////////////////////////////////
#ifdef CUDA_TEST_MAIN
template<typename T>
void
test_cublas_inverse(int matSize, int numMats)
{
cudaError_t err;
// Initialize cublas
cublasHandle_t handle;
cublasStatus_t status;
status = cublasCreate(&handle);
checkCublasError(status, "Failed to create cublas handle (cublasCreate)");
srand48((long) 12394);
int N = matSize;
int row_stride = (matSize+15) / 16 * 16;
T **Alist, **AWorklist;
T **Alist_d, **AWorklist_d;
T **Clist, **CWorklist;
T **Clist_d, **CWorklist_d;
// Allocate arrays of pointers (one set on host, one set on device)
// pointing to the starting address (on device) of each matrix and its buffer
// (similar to DiracDeterminantCUDA)
Alist = (T**) malloc(numMats * sizeof(T*));
err = cudaMalloc((void**) &Alist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for Alist_d in test_cublas_inverse (cudaMalloc)");
AWorklist = (T**) malloc(numMats * sizeof(T*));
err = cudaMalloc((void**) &AWorklist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for AWorklist_d in test_cublas_inverse (cudaMalloc)");
Clist = (T**) malloc(numMats * sizeof(T*));
err = cudaMalloc((void**) &Clist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for Clist_d in test_cublas_inverse (cudaMalloc)");
CWorklist = (T**) malloc(numMats * sizeof(T*));
err = cudaMalloc((void**) &CWorklist_d, numMats * sizeof(T*));
checkCUDAError(err, "Failed to allocate memory for CWorklist_d in test_cublas_inverse (cudaMalloc)");
// Generate matrices filled with random numbers
T* A = (T*) malloc(sizeof(T) * numMats * N * row_stride);
for (int j=0; j<numMats; j++)
for (int i=0; i<N*row_stride; i++)
A[j*N*row_stride+i] = 1.0 * (drand48() - 0.5);
// Allocate memory on device for each matrix
for (int mat=0; mat<numMats; mat++)
{
err = cudaMalloc((void**) &(Alist[mat]), N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for Alist[mat] in test_cublas_inverse (cudaMalloc)");
err = cudaMemcpyAsync(Alist[mat], &A[mat*N*row_stride], N * row_stride * sizeof(T), cudaMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from A to Alist[mat] (cudaMemcpyAsync, HostToDevice)");
err = cudaMalloc((void**) &(AWorklist[mat]), 2 * N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for AWorklist[mat] in test_cublas_inverse (cudaMalloc)");
err = cudaMalloc((void**) &(Clist[mat]), N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for Clist[mat] in test_cublas_inverse (cudaMalloc)");
err = cudaMalloc((void**) &(CWorklist[mat]), 2 * N * row_stride * sizeof(T));
checkCUDAError(err, "Failed to allocate memory for CWorklist[mat] in test_cublas_inverse (cudaMalloc)");
}
// Copy the starting address of each matrix
err = cudaMemcpyAsync (Alist_d, Alist, numMats * sizeof(T*), cudaMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from Alist to Alist_d (cudaMemcpyAsync, HostToDevice)");
err = cudaMemcpyAsync (AWorklist_d, AWorklist, numMats * sizeof(T*), cudaMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from AWorklist to AWorklist_d (cudaMemcpyAsync, HostToDevice)");
err = cudaMemcpyAsync (Clist_d, Clist, numMats * sizeof(T*), cudaMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from Clist to Clist_d (cudaMemcpyAsync, HostToDevice)");
err = cudaMemcpyAsync (CWorklist_d, CWorklist, numMats * sizeof(T*), cudaMemcpyHostToDevice);
checkCUDAError(err, "Failed to copy from CWorklist to CWorklist_d (cudaMemcpyAsync, HostToDevice)");
cudaDeviceSynchronize();
clock_t start = clock();
// Call cublas functions to do inversion
cublas_inverse (handle, Alist_d, Clist_d, AWorklist_d, CWorklist_d, N, row_stride, numMats, true);
cudaDeviceSynchronize();
clock_t end = clock();
double t = double(end-start) / double(CLOCKS_PER_SEC) / double(numMats);
double rate = 1.0 / t;
fprintf (stderr, "Rate is %1.3f matrix inversions per second.\n",
rate);
// Copy A^(-1) back to host memory Ainv; one matrix at a time
// Calculate error of A^(-1)A from unit matrix I
for (int mat=0; mat<numMats; mat++)
{
T Ainv[N*row_stride];
err = cudaMemcpy(Ainv, Clist[mat], N * row_stride * sizeof(T), cudaMemcpyDeviceToHost);
checkCUDAError(err, "Failed to copy from Clist[mat] to Ainv (cudaMemcpy, DeviceToHost)");
double error = 0.0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
{
double val = 0.0;
for (int k=0; k<N; k++)
val += Ainv[i*row_stride+k] * A[mat*N*row_stride+k*row_stride+j];
double diff = (i==j) ? (1.0f - val) : val;
error += diff * diff;
}
fprintf (stderr, "error = %1.8e\n", sqrt(error/(double)(N*N)));
}
// Finalize cublas
status = cublasDestroy(handle);
checkCublasError(status, "Failed to destroy cublas handle (cublasDestroy)");
// Free resources on both host and device
for (int mat=0; mat<numMats; mat++)
{
cudaFree(Alist[mat]);
cudaFree(Clist[mat]);
cudaFree(AWorklist[mat]);
cudaFree(CWorklist[mat]);
}
cudaFree(Alist_d);
cudaFree(Clist_d);
cudaFree(AWorklist_d);
cudaFree(CWorklist_d);
free(Alist);
free(Clist);
free(AWorklist);
free(CWorklist);
free(A);
// Reset device. Required for memory leak debugging
cudaDeviceReset();
}
int main(int argc, char** argv)
{
int matSize = 0;
int numMats = 0;
if (argc == 3) {
matSize = atoi(argv[1]);
numMats = atoi(argv[2]);
}
else {
printf("Usage: ./cuda_inverse [matrix size] [number of matrices]\n");
exit(1);
}
test_cublas_inverse<double>(matSize, numMats);
test_cublas_inverse<float>(matSize, numMats);
return 0;
}
#endif
|
59089e945b0fc615f836fedf46888e7b726f2fc6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <vector>
#include "driver/nvidia_tensorrt/converter/plugin/yolo_box.h"
namespace nnadapter {
namespace nvidia_tensorrt {
YoloBoxPluginDynamic::YoloBoxPluginDynamic(const std::vector<int32_t>& anchors,
int class_num,
float conf_thresh,
int downsample_ratio,
bool clip_bbox,
float scale_x_y,
bool iou_aware,
float iou_aware_factor)
: anchors_(anchors),
class_num_(class_num),
conf_thresh_(conf_thresh),
downsample_ratio_(downsample_ratio),
clip_bbox_(clip_bbox),
scale_x_y_(scale_x_y),
iou_aware_(iou_aware),
iou_aware_factor_(iou_aware_factor) {}
YoloBoxPluginDynamic::YoloBoxPluginDynamic(const void* serial_data,
size_t serial_length) {
Deserialize(&serial_data, &serial_length, &anchors_);
Deserialize(&serial_data, &serial_length, &class_num_);
Deserialize(&serial_data, &serial_length, &conf_thresh_);
Deserialize(&serial_data, &serial_length, &downsample_ratio_);
Deserialize(&serial_data, &serial_length, &clip_bbox_);
Deserialize(&serial_data, &serial_length, &scale_x_y_);
Deserialize(&serial_data, &serial_length, &iou_aware_);
Deserialize(&serial_data, &serial_length, &iou_aware_factor_);
}
nvinfer1::IPluginV2DynamicExt* YoloBoxPluginDynamic::clone() const
TRT_NOEXCEPT {
return new YoloBoxPluginDynamic(anchors_,
class_num_,
conf_thresh_,
downsample_ratio_,
clip_bbox_,
scale_x_y_,
iou_aware_,
iou_aware_factor_);
}
nvinfer1::DimsExprs YoloBoxPluginDynamic::getOutputDimensions(
int32_t output_index,
const nvinfer1::DimsExprs* inputs,
int32_t nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
NNADAPTER_CHECK(inputs);
nvinfer1::DimsExprs outdims;
outdims.nbDims = 3;
outdims.d[0] = inputs[0].d[0];
int h = inputs[0].d[2]->getConstantValue();
int w = inputs[0].d[3]->getConstantValue();
int boxnum = h * w * anchors_.size() / 2;
outdims.d[1] = expr_builder.constant(boxnum);
if (output_index == 0) {
outdims.d[2] = expr_builder.constant(4);
} else if (output_index == 1) {
outdims.d[2] = expr_builder.constant(class_num_);
}
return outdims;
}
template <typename T>
inline __device__ T Sigmoid(T x) {
return (T)1. / ((T)1. + ::exp(-x));
}
template <typename T>
inline __device__ void GetYoloBox(T* box,
const T* x,
const int* anchors,
int i,
int j,
int an_idx,
int grid_size,
int input_size,
int index,
int stride,
int img_height,
int img_width,
T scale,
T bias) {
box[0] = (i + Sigmoid(x[index]) * scale + bias) * img_width / grid_size;
box[1] =
(j + Sigmoid(x[index + stride]) * scale + bias) * img_height / grid_size;
box[2] = expf(x[index + 2 * stride]) * anchors[2 * an_idx] * img_width /
input_size;
box[3] = expf(x[index + 3 * stride]) * anchors[2 * an_idx + 1] * img_height /
input_size;
}
inline __device__ int GetEntryIndex(int batch,
int an_idx,
int hw_idx,
int an_num,
int an_stride,
int stride,
int entry) {
return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx;
}
template <typename T>
inline __device__ void CalcDetectionBox(T* boxes,
T* box,
const int box_idx,
const int img_height,
const int img_width,
bool clip_bbox) {
boxes[box_idx] = box[0] - box[2] / 2.f;
boxes[box_idx + 1] = box[1] - box[3] / 2.f;
boxes[box_idx + 2] = box[0] + box[2] / 2.f;
boxes[box_idx + 3] = box[1] + box[3] / 2.f;
if (clip_bbox) {
boxes[box_idx] = boxes[box_idx] > 0 ? boxes[box_idx] : static_cast<T>(0);
boxes[box_idx + 1] =
boxes[box_idx + 1] > 0 ? boxes[box_idx + 1] : static_cast<T>(0);
boxes[box_idx + 2] = boxes[box_idx + 2] < img_width - 1
? boxes[box_idx + 2]
: static_cast<T>(img_width - 1);
boxes[box_idx + 3] = boxes[box_idx + 3] < img_height - 1
? boxes[box_idx + 3]
: static_cast<T>(img_height - 1);
}
}
template <typename T>
inline __device__ void CalcLabelScore(T* scores,
const T* input,
const int label_idx,
const int score_idx,
const int class_num,
const T conf,
const int stride) {
for (int i = 0; i < class_num; i++) {
scores[score_idx + i] = conf * Sigmoid(input[label_idx + i * stride]);
}
}
template <typename T, unsigned TPB>
__global__ void yolobox_kernel_value(int n,
int h,
int w,
const float* input_data,
const int* imgsize_data,
float* boxes_data,
float* scores_data,
const int* anchors_data,
int anchor_size,
int class_num,
float conf_thresh,
int downsample_ratio,
bool clip_bbox,
float scale_x_y,
bool iou_aware,
float iou_aware_factor) {
int idx = blockIdx.x * TPB + threadIdx.x;
T bias = static_cast<T>(-0.5 * (scale_x_y - 1.));
const int b_num = anchor_size / 2 * h * w;
const int an_num = anchor_size / 2;
int X_size = downsample_ratio * h;
const int stride = h * w;
const int an_stride = (class_num + 5) * stride;
if (idx < n * b_num) {
memset(&boxes_data[idx * 4], 0, 4 * sizeof(T));
memset(&scores_data[idx * class_num], 0, class_num * sizeof(T));
T box[4];
int i = idx / b_num; // batch id
int j = (idx % b_num) / (h * w); // anchor id
int k = ((idx % b_num) % (h * w)) / w; // h id
int l = ((idx % b_num) % (h * w)) % w; // w id
int img_height = imgsize_data[2 * i];
int img_width = imgsize_data[2 * i + 1];
int obj_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 4);
T conf = Sigmoid(input_data[obj_idx]);
if (conf < conf_thresh) {
return;
}
int box_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 0);
GetYoloBox(box,
input_data,
anchors_data,
l,
k,
j,
h,
X_size,
box_idx,
stride,
img_height,
img_width,
scale_x_y,
bias);
box_idx = (i * b_num + j * stride + k * w + l) * 4;
CalcDetectionBox(
boxes_data, box, box_idx, img_height, img_width, clip_bbox);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 5);
int score_idx = (i * b_num + j * stride + k * w + l) * class_num;
CalcLabelScore(
scores_data, input_data, label_idx, score_idx, class_num, conf, stride);
}
}
int32_t YoloBoxPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
void const* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
const int n = input_desc[0].dims.d[0];
const int h = input_desc[0].dims.d[2];
const int w = input_desc[0].dims.d[3];
const int b_num = output_desc[0].dims.d[1];
const int block_size = 256;
const int grid_size = (n * b_num + block_size - 1) / block_size;
const float* input_data = static_cast<const float*>(inputs[0]);
const int* imgsize_data = static_cast<const int*>(inputs[1]);
float* boxes_data = static_cast<float*>(outputs[0]);
float* scores_data = static_cast<float*>(outputs[1]);
int* dev_anchor_data;
hipMalloc(reinterpret_cast<void**>(&dev_anchor_data),
anchors_.size() * sizeof(int));
hipMemcpy(dev_anchor_data,
anchors_.data(),
anchors_.size() * sizeof(int),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( yolobox_kernel_value<float, block_size>), dim3(grid_size), dim3(block_size), 0, stream,
n,
h,
w,
input_data,
imgsize_data,
boxes_data,
scores_data,
dev_anchor_data,
anchors_.size(),
class_num_,
conf_thresh_,
downsample_ratio_,
clip_bbox_,
scale_x_y_,
iou_aware_,
iou_aware_factor_);
hipFree(dev_anchor_data);
return 0;
}
size_t YoloBoxPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(anchors_) + sizeof(class_num_) + sizeof(conf_thresh_) +
sizeof(downsample_ratio_) + sizeof(clip_bbox_) + sizeof(scale_x_y_) +
sizeof(iou_aware_) + sizeof(iou_aware_factor_);
}
void YoloBoxPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
Serialize(&buffer, anchors_);
Serialize(&buffer, class_num_);
Serialize(&buffer, conf_thresh_);
Serialize(&buffer, downsample_ratio_);
Serialize(&buffer, clip_bbox_);
Serialize(&buffer, scale_x_y_);
Serialize(&buffer, iou_aware_);
Serialize(&buffer, iou_aware_factor_);
}
int32_t YoloBoxPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 2; }
nvinfer1::DataType YoloBoxPluginDynamic::getOutputDataType(
int32_t index,
const nvinfer1::DataType* input_types,
int32_t nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
REGISTER_NNADAPTER_TENSORRT_PLUGIN(YoloBoxPluginDynamic,
YoloBoxPluginDynamicCreator,
"yolo_box_plugin_dynamic");
} // namespace nvidia_tensorrt
} // namespace nnadapter
|
59089e945b0fc615f836fedf46888e7b726f2fc6.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <vector>
#include "driver/nvidia_tensorrt/converter/plugin/yolo_box.h"
namespace nnadapter {
namespace nvidia_tensorrt {
YoloBoxPluginDynamic::YoloBoxPluginDynamic(const std::vector<int32_t>& anchors,
int class_num,
float conf_thresh,
int downsample_ratio,
bool clip_bbox,
float scale_x_y,
bool iou_aware,
float iou_aware_factor)
: anchors_(anchors),
class_num_(class_num),
conf_thresh_(conf_thresh),
downsample_ratio_(downsample_ratio),
clip_bbox_(clip_bbox),
scale_x_y_(scale_x_y),
iou_aware_(iou_aware),
iou_aware_factor_(iou_aware_factor) {}
YoloBoxPluginDynamic::YoloBoxPluginDynamic(const void* serial_data,
size_t serial_length) {
Deserialize(&serial_data, &serial_length, &anchors_);
Deserialize(&serial_data, &serial_length, &class_num_);
Deserialize(&serial_data, &serial_length, &conf_thresh_);
Deserialize(&serial_data, &serial_length, &downsample_ratio_);
Deserialize(&serial_data, &serial_length, &clip_bbox_);
Deserialize(&serial_data, &serial_length, &scale_x_y_);
Deserialize(&serial_data, &serial_length, &iou_aware_);
Deserialize(&serial_data, &serial_length, &iou_aware_factor_);
}
nvinfer1::IPluginV2DynamicExt* YoloBoxPluginDynamic::clone() const
TRT_NOEXCEPT {
return new YoloBoxPluginDynamic(anchors_,
class_num_,
conf_thresh_,
downsample_ratio_,
clip_bbox_,
scale_x_y_,
iou_aware_,
iou_aware_factor_);
}
nvinfer1::DimsExprs YoloBoxPluginDynamic::getOutputDimensions(
int32_t output_index,
const nvinfer1::DimsExprs* inputs,
int32_t nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
NNADAPTER_CHECK(inputs);
nvinfer1::DimsExprs outdims;
outdims.nbDims = 3;
outdims.d[0] = inputs[0].d[0];
int h = inputs[0].d[2]->getConstantValue();
int w = inputs[0].d[3]->getConstantValue();
int boxnum = h * w * anchors_.size() / 2;
outdims.d[1] = expr_builder.constant(boxnum);
if (output_index == 0) {
outdims.d[2] = expr_builder.constant(4);
} else if (output_index == 1) {
outdims.d[2] = expr_builder.constant(class_num_);
}
return outdims;
}
template <typename T>
inline __device__ T Sigmoid(T x) {
return (T)1. / ((T)1. + std::exp(-x));
}
template <typename T>
inline __device__ void GetYoloBox(T* box,
const T* x,
const int* anchors,
int i,
int j,
int an_idx,
int grid_size,
int input_size,
int index,
int stride,
int img_height,
int img_width,
T scale,
T bias) {
box[0] = (i + Sigmoid(x[index]) * scale + bias) * img_width / grid_size;
box[1] =
(j + Sigmoid(x[index + stride]) * scale + bias) * img_height / grid_size;
box[2] = expf(x[index + 2 * stride]) * anchors[2 * an_idx] * img_width /
input_size;
box[3] = expf(x[index + 3 * stride]) * anchors[2 * an_idx + 1] * img_height /
input_size;
}
inline __device__ int GetEntryIndex(int batch,
int an_idx,
int hw_idx,
int an_num,
int an_stride,
int stride,
int entry) {
return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx;
}
template <typename T>
inline __device__ void CalcDetectionBox(T* boxes,
T* box,
const int box_idx,
const int img_height,
const int img_width,
bool clip_bbox) {
boxes[box_idx] = box[0] - box[2] / 2.f;
boxes[box_idx + 1] = box[1] - box[3] / 2.f;
boxes[box_idx + 2] = box[0] + box[2] / 2.f;
boxes[box_idx + 3] = box[1] + box[3] / 2.f;
if (clip_bbox) {
boxes[box_idx] = boxes[box_idx] > 0 ? boxes[box_idx] : static_cast<T>(0);
boxes[box_idx + 1] =
boxes[box_idx + 1] > 0 ? boxes[box_idx + 1] : static_cast<T>(0);
boxes[box_idx + 2] = boxes[box_idx + 2] < img_width - 1
? boxes[box_idx + 2]
: static_cast<T>(img_width - 1);
boxes[box_idx + 3] = boxes[box_idx + 3] < img_height - 1
? boxes[box_idx + 3]
: static_cast<T>(img_height - 1);
}
}
template <typename T>
inline __device__ void CalcLabelScore(T* scores,
const T* input,
const int label_idx,
const int score_idx,
const int class_num,
const T conf,
const int stride) {
for (int i = 0; i < class_num; i++) {
scores[score_idx + i] = conf * Sigmoid(input[label_idx + i * stride]);
}
}
template <typename T, unsigned TPB>
__global__ void yolobox_kernel_value(int n,
int h,
int w,
const float* input_data,
const int* imgsize_data,
float* boxes_data,
float* scores_data,
const int* anchors_data,
int anchor_size,
int class_num,
float conf_thresh,
int downsample_ratio,
bool clip_bbox,
float scale_x_y,
bool iou_aware,
float iou_aware_factor) {
int idx = blockIdx.x * TPB + threadIdx.x;
T bias = static_cast<T>(-0.5 * (scale_x_y - 1.));
const int b_num = anchor_size / 2 * h * w;
const int an_num = anchor_size / 2;
int X_size = downsample_ratio * h;
const int stride = h * w;
const int an_stride = (class_num + 5) * stride;
if (idx < n * b_num) {
memset(&boxes_data[idx * 4], 0, 4 * sizeof(T));
memset(&scores_data[idx * class_num], 0, class_num * sizeof(T));
T box[4];
int i = idx / b_num; // batch id
int j = (idx % b_num) / (h * w); // anchor id
int k = ((idx % b_num) % (h * w)) / w; // h id
int l = ((idx % b_num) % (h * w)) % w; // w id
int img_height = imgsize_data[2 * i];
int img_width = imgsize_data[2 * i + 1];
int obj_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 4);
T conf = Sigmoid(input_data[obj_idx]);
if (conf < conf_thresh) {
return;
}
int box_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 0);
GetYoloBox(box,
input_data,
anchors_data,
l,
k,
j,
h,
X_size,
box_idx,
stride,
img_height,
img_width,
scale_x_y,
bias);
box_idx = (i * b_num + j * stride + k * w + l) * 4;
CalcDetectionBox(
boxes_data, box, box_idx, img_height, img_width, clip_bbox);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 5);
int score_idx = (i * b_num + j * stride + k * w + l) * class_num;
CalcLabelScore(
scores_data, input_data, label_idx, score_idx, class_num, conf, stride);
}
}
int32_t YoloBoxPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
void const* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const int n = input_desc[0].dims.d[0];
const int h = input_desc[0].dims.d[2];
const int w = input_desc[0].dims.d[3];
const int b_num = output_desc[0].dims.d[1];
const int block_size = 256;
const int grid_size = (n * b_num + block_size - 1) / block_size;
const float* input_data = static_cast<const float*>(inputs[0]);
const int* imgsize_data = static_cast<const int*>(inputs[1]);
float* boxes_data = static_cast<float*>(outputs[0]);
float* scores_data = static_cast<float*>(outputs[1]);
int* dev_anchor_data;
cudaMalloc(reinterpret_cast<void**>(&dev_anchor_data),
anchors_.size() * sizeof(int));
cudaMemcpy(dev_anchor_data,
anchors_.data(),
anchors_.size() * sizeof(int),
cudaMemcpyHostToDevice);
yolobox_kernel_value<float, block_size><<<grid_size, block_size, 0, stream>>>(
n,
h,
w,
input_data,
imgsize_data,
boxes_data,
scores_data,
dev_anchor_data,
anchors_.size(),
class_num_,
conf_thresh_,
downsample_ratio_,
clip_bbox_,
scale_x_y_,
iou_aware_,
iou_aware_factor_);
cudaFree(dev_anchor_data);
return 0;
}
size_t YoloBoxPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(anchors_) + sizeof(class_num_) + sizeof(conf_thresh_) +
sizeof(downsample_ratio_) + sizeof(clip_bbox_) + sizeof(scale_x_y_) +
sizeof(iou_aware_) + sizeof(iou_aware_factor_);
}
void YoloBoxPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
Serialize(&buffer, anchors_);
Serialize(&buffer, class_num_);
Serialize(&buffer, conf_thresh_);
Serialize(&buffer, downsample_ratio_);
Serialize(&buffer, clip_bbox_);
Serialize(&buffer, scale_x_y_);
Serialize(&buffer, iou_aware_);
Serialize(&buffer, iou_aware_factor_);
}
int32_t YoloBoxPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 2; }
nvinfer1::DataType YoloBoxPluginDynamic::getOutputDataType(
int32_t index,
const nvinfer1::DataType* input_types,
int32_t nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
REGISTER_NNADAPTER_TENSORRT_PLUGIN(YoloBoxPluginDynamic,
YoloBoxPluginDynamicCreator,
"yolo_box_plugin_dynamic");
} // namespace nvidia_tensorrt
} // namespace nnadapter
|
d5c057f635fde364177a28cbe0cf30d49da83eed.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "cublas_utils.h"
using data_type = hipDoubleComplex;
int main(int argc, char *argv[]) {
hipblasHandle_t cublasH = NULL;
hipStream_t stream = NULL;
/*
* A = | 1.1 + 1.2j | 2.3 + 2.4j | 3.5 + 3.6j | 4.7 + 4.8j |
* B = | 5.1 + 5.2j | 6.3 + 6.4j | 7.5 + 7.6j | 8.7 + 8.8j |
*/
const std::vector<data_type> A = {{1.1, 1.2}, {2.3, 2.4}, {3.5, 3.6}, {4.7, 4.8}};
const std::vector<data_type> B = {{5.1, 5.2}, {6.3, 6.4}, {7.5, 7.6}, {8.7, 8.8}};
const int incx = 1;
const int incy = 1;
data_type result = {0.0, 0.0};
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUBLAS_CHECK(hipblasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice,
stream));
CUDA_CHECK(hipMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), hipMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(hipblasDotcEx_v2(cublasH, A.size(), d_A, traits<data_type>::cuda_data_type, incx, d_B,
traits<data_type>::cuda_data_type, incy, &result,
traits<data_type>::cuda_data_type, traits<data_type>::cuda_data_type));
CUDA_CHECK(hipStreamSynchronize(stream));
/*
* result = 178.44+-1.60j
*/
printf("Result\n");
printf("%0.2f+%0.2fj\n", result.x, result.y);
printf("=====\n");
/* free resources */
CUDA_CHECK(hipFree(d_A));
CUDA_CHECK(hipFree(d_B));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
|
d5c057f635fde364177a28cbe0cf30d49da83eed.cu
|
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "cublas_utils.h"
using data_type = cuDoubleComplex;
int main(int argc, char *argv[]) {
cublasHandle_t cublasH = NULL;
cudaStream_t stream = NULL;
/*
* A = | 1.1 + 1.2j | 2.3 + 2.4j | 3.5 + 3.6j | 4.7 + 4.8j |
* B = | 5.1 + 5.2j | 6.3 + 6.4j | 7.5 + 7.6j | 8.7 + 8.8j |
*/
const std::vector<data_type> A = {{1.1, 1.2}, {2.3, 2.4}, {3.5, 3.6}, {4.7, 4.8}};
const std::vector<data_type> B = {{5.1, 5.2}, {6.3, 6.4}, {7.5, 7.6}, {8.7, 8.8}};
const int incx = 1;
const int incy = 1;
data_type result = {0.0, 0.0};
data_type *d_A = nullptr;
data_type *d_B = nullptr;
printf("A\n");
print_vector(A.size(), A.data());
printf("=====\n");
printf("B\n");
print_vector(B.size(), B.data());
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUBLAS_CHECK(cublasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size()));
CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice,
stream));
CUDA_CHECK(cudaMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), cudaMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(cublasDotcEx(cublasH, A.size(), d_A, traits<data_type>::cuda_data_type, incx, d_B,
traits<data_type>::cuda_data_type, incy, &result,
traits<data_type>::cuda_data_type, traits<data_type>::cuda_data_type));
CUDA_CHECK(cudaStreamSynchronize(stream));
/*
* result = 178.44+-1.60j
*/
printf("Result\n");
printf("%0.2f+%0.2fj\n", result.x, result.y);
printf("=====\n");
/* free resources */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_B));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
436d421e3f5609efc263018cbbcf4a054214a354.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_modf (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(modf)(a[offset_a + gid_0 + gid_1 * ld_a], &b[offset_b + gid_0 + gid_1 * ld_b]);
}
}
|
436d421e3f5609efc263018cbbcf4a054214a354.cu
|
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_modf (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(modf)(a[offset_a + gid_0 + gid_1 * ld_a], &b[offset_b + gid_0 + gid_1 * ld_b]);
}
}
|
6c3e51a8f8aa51dd208753f340eef0d0dcd9f4aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include "makebmp.h"
/*#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>*/
#include <cutil.h>
typedef unsigned int uint;
typedef unsigned char uchar;
#define numObj 4
#define PI 3.141592654f
#define Angle(a) ((a*PI)/180.0)
//#define DEVICE_EMU
//#define DEBUG_RT_CUDA
#define FIXED_CONST_PARSE
#ifdef DEBUG_RT_CUDA
#define DEBUG_NUM 8
float4 *d_debug_float4;
uint *d_debug_uint;
float4 *h_debug_float4;
uint *h_debug_uint;
#endif
int g_verbose;
#include <rayTracing_kernel.cu>
unsigned width = 64; //640; //512; //16; //32; //512;
unsigned height = 64; //480; //512; //16;//512;
dim3 blockSize(16,8);
dim3 gridSize(width/blockSize.x, height/blockSize.y);
float3 viewRotation;
float3 viewTranslation = make_float3(0.0, 0.0, -4.0f);
float invViewMatrix[12];
//static int fpsCount = 0; // FPS count for averaging
//static int fpsLimit = 1; // FPS limit for sampling
unsigned int timer;
//GLuint pbo = 0; // Pixel buffer d'OpenGL
void initPixelBuffer();
class Observateur
{
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur( );
Observateur(const float3 &, const float3 &, const float3 &, double );
inline const matrice3x4 & getMatrice( ) const { return M; }
inline float getDistance( ) const { return df; }
};
Observateur::Observateur()
{
M.m[0] = make_float4(0.0f,0.0f,1.0f,0.0f);
M.m[1] = make_float4(0.0f,1.0f,0.0f,0.0f);
M.m[2] = make_float4(1.0f,0.0f,0.0f,0.0f);
df = 1.0 / tan(Angle(65)/2.0);
}
Observateur::Observateur(const float3 & p, const float3 & u, const float3 & v, double a )
{
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U,VP)*U);
W = normalize(cross(U,V));
M.m[0] = make_float4(U.x,U.y,U.z,p.x);
M.m[1] = make_float4(V.x,V.y,V.z,p.y);
M.m[2] = make_float4(W.x,W.y,W.z,p.z);
df = 1.0 / tan(Angle(a)/2.0);
}
float anim = 0.0f, pas = 0.015f;
Observateur obs = Observateur(make_float3(0.0f,0.5f,2.0f),normalize(make_float3(0.0f,0.0f,0.0f)-make_float3(0.0f,0.5f,2.0f)),make_float3(0.0f,1.0f,0.0f),65.0f);;
uint * values = NULL, * d_output, * d_temp, NUM;
uint * c_output;
Node node[numObj], * d_node;
Sphere s, s1, s2;
float phi;
uint * nObj;
float * prof;
Rayon * ray;
float3 * A, *u;
int t = 1;
void initObjet()
{
srand(47);
node->s.r = 1.0f;
node[0].s.C = make_float3(0.0f,-1.5f,-0.0f); node[0].s.r = 0.5f;
node[1].s.C = make_float3(-1.0f,0.0f,-1.0f); node[1].s.r = 0.5f;
node[2].s.C = make_float3(1.0f,-0.f,-1.0f); node[2].s.r = 0.5f;
node[3].s.C = make_float3(0.0f,-0.f,-2.0f); node[3].s.r = 0.75f;
for( int i(4); i < numObj; i++ ) {
float r,v,b;
float tmp1(5.0f*((r=(float(rand()%255)/255.0f)))-2.5f);
float tmp2(5.0f*((v=(float(rand()%255)/255.0f)))-2.5f);
float tmp3(-5.0f*((b=(float(rand()%255)/255.0f))));
float tmp4((rand()%100)/100.0f);
node[i].s.C = make_float3(tmp1,tmp2,tmp3); node[i].s.r = tmp4;
node[i].s.R = r; node[i].s.V = v; node[i].s.B = b; node[i].s.A = 1.0f;
node[i].fg = 0; node[i].fd = 0;
}
node[0].s.R = 0.0f; node[0].s.V = 1.0f; node[0].s.B = 1.0f; node[0].s.A = 1.0f;
node[1].s.R = 1.0f; node[1].s.V = 0.0f; node[1].s.B = 0.0f; node[1].s.A = 1.0f;
node[2].s.R = 0.0f; node[2].s.V = 0.0f; node[2].s.B = 1.0f; node[2].s.A = 1.0f;
node[3].s.R = 0.0f; node[3].s.V = 1.0f; node[3].s.B = 0.0f; node[3].s.A = 1.0f;
//createNode(&node[0], &node[1], &node[2], 1.0f);
node[0].fg = 1; node[0].fd = 2;
node[1].fg = 0; node[1].fd = 0;
node[2].fg = 0; node[2].fd = 0;
node[3].fg = 0; node[3].fd = 0;
#ifdef DEBUG_RT_CUDA
h_debug_float4 = (float4*) calloc(DEBUG_NUM, sizeof(float4));
h_debug_uint = (uint*) calloc(DEBUG_NUM, sizeof(uint));
CUDA_SAFE_CALL( hipMalloc( (void**)&d_debug_float4, DEBUG_NUM*sizeof(float4)));
CUDA_SAFE_CALL( hipMalloc( (void**)&d_debug_uint, DEBUG_NUM*sizeof(uint)));
CUDA_SAFE_CALL( hipMemcpy( d_debug_float4, h_debug_float4, DEBUG_NUM*sizeof(float4), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_debug_uint, h_debug_uint, DEBUG_NUM*sizeof(uint), hipMemcpyHostToDevice) );
#endif
c_output = (uint*) calloc(width*height, sizeof(uint));
CUDA_SAFE_CALL( hipMalloc( (void**)&d_output, width*height*sizeof(uint)));
CUDA_SAFE_CALL( hipMalloc( (void**)&d_node, numObj*sizeof(Node) ));
CUDA_SAFE_CALL( hipMemcpy( d_node, node, numObj*sizeof(Node), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(cnode, node, numObj*sizeof(Node)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(MView, (void*)&obs, 3*sizeof(float4)) );
CUDA_SAFE_CALL( hipMalloc( (void**)&d_temp, width * height*sizeof(uint)));
CUDA_SAFE_CALL( hipMemset(d_temp, 0, width * height*sizeof(uint)) );
CUDA_SAFE_CALL( hipMalloc( (void**)&nObj, width * height*sizeof(uint)));
CUDA_SAFE_CALL( hipMalloc( (void**)&prof, width * height*sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void**)&ray, width * height*sizeof(Rayon)));
CUDA_SAFE_CALL( hipMalloc( (void**)&A, width * height*sizeof(float3)));
CUDA_SAFE_CALL( hipMalloc( (void**)&u, width * height*sizeof(float3)));
}
#define PRINT_PIXELS
// Rendu de l'image avec CUDA
void render()
{
// map PBO to get CUDA device pointer <GY: replace with memcpy?>
//CUDA_SAFE_CALL(hipGLMapBufferObject__((void**)&d_output, pbo));
//CUDA_SAFE_CALL( hipMemcpy( d_output, c_output, width*height*sizeof(uint), hipMemcpyHostToDevice) );
// call CUDA kernel, writing results to PBO
CUT_SAFE_CALL(cutStartTimer(timer));
#ifdef DEBUG_RT_CUDA
hipLaunchKernelGGL(( render), dim3(gridSize), dim3(blockSize), 0, 0, d_debug_float4, d_debug_uint, d_output, d_node, width, height, anim, obs.getDistance());
#else
hipLaunchKernelGGL(( render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, d_node, width, height, anim, obs.getDistance());
#endif
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(timer));
#ifdef DEBUG_RT_CUDA
CUDA_SAFE_CALL( hipMemcpy( h_debug_float4, d_debug_float4, DEBUG_NUM*sizeof(float4), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy( h_debug_uint, d_debug_uint, DEBUG_NUM*sizeof(uint), hipMemcpyDeviceToHost) );
printf("debug_float4\n");
for (int i=0; i< DEBUG_NUM; i++) {
printf("%e %e %e %e\n", h_debug_float4[i].x, h_debug_float4[i].y, h_debug_float4[i].z, h_debug_float4[i].w);
}
printf("debug_uint\n");
for (int i=0; i< DEBUG_NUM; i++) {
printf("0x%x\n", h_debug_uint[i]);
}
#endif
CUDA_SAFE_CALL( hipMemcpy( c_output, d_output, width*height*sizeof(uint), hipMemcpyDeviceToHost) );
unsigned long long int checksum = 0;
for (int y=(height-1); y >= 0; y--){
if (g_verbose) printf("\n");
for (int x=0; x< width; x++) {
if (g_verbose) printf("%010u ", (unsigned) c_output[x+y*width]);
checksum += c_output[x+y*width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
CUT_CHECK_ERROR("Erreur kernel");
//CUDA_SAFE_CALL(hipGLUnmapBufferObject(pbo)); //<GY: replace with memcpy?>
}
// Affichage du resultat avec OpenGL
void display()
{
// Affichage du resultat
//glClear(GL_COLOR_BUFFER_BIT);
//CUT_SAFE_CALL(cutStartTimer(timer));
render();
//CUT_SAFE_CALL(cutStopTimer(timer));
printf("Kernel Time: %f \n", cutGetTimerValue(timer));
/*fpsCount++;
if (fpsCount == fpsLimit) {
char fps[256];
float ifps = 1.f / (cutGetAverageTimerValue(timer) / 1000.f);
sprintf(fps, "Cuda Ray Tracing: %.1f fps", ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = (int)max(ifps, 1.f);
CUT_SAFE_CALL(cutResetTimer(timer));
}*/
if( anim >= 1.0f ) pas = -0.015f;
else if( anim <= -1.0f ) pas = 0.015f;
anim += pas;
// Dessin de l'image de PBO
/*glDisable(GL_DEPTH_TEST);
glRasterPos2i(0, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glutSwapBuffers();
glutReportErrors();*/
t--;
if (!t) {
return;
}
}
/*void idle()
{
glutPostRedisplay();
}
void keyboard(unsigned char , int , int )
{
//glutPostRedisplay();
}*/
int ox, oy;
int buttonState = 0;
/*void mouse(int , int , int , int )
{
if (state == GLUT_DOWN)
buttonState |= 1<<button;
else if (state == GLUT_UP)
buttonState = 0;
ox = x; oy = y;
glutPostRedisplay();
}
void motion(int , int )
{
float dx, dy;
dx = x - ox;
dy = y - oy;
if (buttonState == 3) {
// left+middle = zoom
viewTranslation.z += dy / 100.0;
}
else if (buttonState & 2) {
// middle = translate
viewTranslation.x += dx / 100.0;
viewTranslation.y -= dy / 100.0;
}
else if (buttonState & 1) {
// left = rotate
viewRotation.x += dy / 5.0;
viewRotation.y += dx / 5.0;
}
ox = x; oy = y;
glutPostRedisplay();
}
void reshape(int x, int y)
{
width = x; height = y;
initPixelBuffer();
glViewport(0, 0, x, y);
//glViewport(-x/2, -y/2, x/2, y/2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void cleanup()
{
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
CUT_SAFE_CALL(cutDeleteTimer(timer));
}*/
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer()
{
/*if (pbo) {
// delete old buffer
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
}*/
NUM = width * height;
phi = 2.0f/(float)min(width,height);
// create pixel buffer object for display
/* glGenBuffersARB(1, &pbo);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, width*height*sizeof(GLubyte)*4, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL(hipGLRegisterBufferObject(pbo));*/
// calculate new grid size
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(hipSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 3) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i=3; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n");
printf("where WIDTH and HEIGHT are the screen dimensions and -v is used to display an abstract representation of the output.\n");
return 1;
}
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutResetTimer(timer));
initialize_bmp(width,height,32);
// initialise les functions callback de GLUT
/*glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow("CUDA Ray Tracing");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 GL_ARB_pixel_buffer_object")) {
fprintf(stderr, "Les extensions minimales d'OpenGL sont absentes.");
exit(-1);
}
initPixelBuffer();
initObjet();
atexit(cleanup);
glutMainLoop();*/
initObjet();
initPixelBuffer();
display();
create_bmp(c_output);
CUT_SAFE_CALL(cutDeleteTimer(timer));
return 0;
}
|
6c3e51a8f8aa51dd208753f340eef0d0dcd9f4aa.cu
|
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include "makebmp.h"
/*#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>*/
#include <cutil.h>
typedef unsigned int uint;
typedef unsigned char uchar;
#define numObj 4
#define PI 3.141592654f
#define Angle(a) ((a*PI)/180.0)
//#define DEVICE_EMU
//#define DEBUG_RT_CUDA
#define FIXED_CONST_PARSE
#ifdef DEBUG_RT_CUDA
#define DEBUG_NUM 8
float4 *d_debug_float4;
uint *d_debug_uint;
float4 *h_debug_float4;
uint *h_debug_uint;
#endif
int g_verbose;
#include <rayTracing_kernel.cu>
unsigned width = 64; //640; //512; //16; //32; //512;
unsigned height = 64; //480; //512; //16;//512;
dim3 blockSize(16,8);
dim3 gridSize(width/blockSize.x, height/blockSize.y);
float3 viewRotation;
float3 viewTranslation = make_float3(0.0, 0.0, -4.0f);
float invViewMatrix[12];
//static int fpsCount = 0; // FPS count for averaging
//static int fpsLimit = 1; // FPS limit for sampling
unsigned int timer;
//GLuint pbo = 0; // Pixel buffer d'OpenGL
void initPixelBuffer();
class Observateur
{
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur( );
Observateur(const float3 &, const float3 &, const float3 &, double );
inline const matrice3x4 & getMatrice( ) const { return M; }
inline float getDistance( ) const { return df; }
};
Observateur::Observateur()
{
M.m[0] = make_float4(0.0f,0.0f,1.0f,0.0f);
M.m[1] = make_float4(0.0f,1.0f,0.0f,0.0f);
M.m[2] = make_float4(1.0f,0.0f,0.0f,0.0f);
df = 1.0 / tan(Angle(65)/2.0);
}
Observateur::Observateur(const float3 & p, const float3 & u, const float3 & v, double a )
{
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U,VP)*U);
W = normalize(cross(U,V));
M.m[0] = make_float4(U.x,U.y,U.z,p.x);
M.m[1] = make_float4(V.x,V.y,V.z,p.y);
M.m[2] = make_float4(W.x,W.y,W.z,p.z);
df = 1.0 / tan(Angle(a)/2.0);
}
float anim = 0.0f, pas = 0.015f;
Observateur obs = Observateur(make_float3(0.0f,0.5f,2.0f),normalize(make_float3(0.0f,0.0f,0.0f)-make_float3(0.0f,0.5f,2.0f)),make_float3(0.0f,1.0f,0.0f),65.0f);;
uint * values = NULL, * d_output, * d_temp, NUM;
uint * c_output;
Node node[numObj], * d_node;
Sphere s, s1, s2;
float phi;
uint * nObj;
float * prof;
Rayon * ray;
float3 * A, *u;
int t = 1;
void initObjet()
{
srand(47);
node->s.r = 1.0f;
node[0].s.C = make_float3(0.0f,-1.5f,-0.0f); node[0].s.r = 0.5f;
node[1].s.C = make_float3(-1.0f,0.0f,-1.0f); node[1].s.r = 0.5f;
node[2].s.C = make_float3(1.0f,-0.f,-1.0f); node[2].s.r = 0.5f;
node[3].s.C = make_float3(0.0f,-0.f,-2.0f); node[3].s.r = 0.75f;
for( int i(4); i < numObj; i++ ) {
float r,v,b;
float tmp1(5.0f*((r=(float(rand()%255)/255.0f)))-2.5f);
float tmp2(5.0f*((v=(float(rand()%255)/255.0f)))-2.5f);
float tmp3(-5.0f*((b=(float(rand()%255)/255.0f))));
float tmp4((rand()%100)/100.0f);
node[i].s.C = make_float3(tmp1,tmp2,tmp3); node[i].s.r = tmp4;
node[i].s.R = r; node[i].s.V = v; node[i].s.B = b; node[i].s.A = 1.0f;
node[i].fg = 0; node[i].fd = 0;
}
node[0].s.R = 0.0f; node[0].s.V = 1.0f; node[0].s.B = 1.0f; node[0].s.A = 1.0f;
node[1].s.R = 1.0f; node[1].s.V = 0.0f; node[1].s.B = 0.0f; node[1].s.A = 1.0f;
node[2].s.R = 0.0f; node[2].s.V = 0.0f; node[2].s.B = 1.0f; node[2].s.A = 1.0f;
node[3].s.R = 0.0f; node[3].s.V = 1.0f; node[3].s.B = 0.0f; node[3].s.A = 1.0f;
//createNode(&node[0], &node[1], &node[2], 1.0f);
node[0].fg = 1; node[0].fd = 2;
node[1].fg = 0; node[1].fd = 0;
node[2].fg = 0; node[2].fd = 0;
node[3].fg = 0; node[3].fd = 0;
#ifdef DEBUG_RT_CUDA
h_debug_float4 = (float4*) calloc(DEBUG_NUM, sizeof(float4));
h_debug_uint = (uint*) calloc(DEBUG_NUM, sizeof(uint));
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_debug_float4, DEBUG_NUM*sizeof(float4)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_debug_uint, DEBUG_NUM*sizeof(uint)));
CUDA_SAFE_CALL( cudaMemcpy( d_debug_float4, h_debug_float4, DEBUG_NUM*sizeof(float4), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_debug_uint, h_debug_uint, DEBUG_NUM*sizeof(uint), cudaMemcpyHostToDevice) );
#endif
c_output = (uint*) calloc(width*height, sizeof(uint));
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_output, width*height*sizeof(uint)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_node, numObj*sizeof(Node) ));
CUDA_SAFE_CALL( cudaMemcpy( d_node, node, numObj*sizeof(Node), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(cnode, node, numObj*sizeof(Node)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(MView, (void*)&obs, 3*sizeof(float4)) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_temp, width * height*sizeof(uint)));
CUDA_SAFE_CALL( cudaMemset(d_temp, 0, width * height*sizeof(uint)) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&nObj, width * height*sizeof(uint)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&prof, width * height*sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&ray, width * height*sizeof(Rayon)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&A, width * height*sizeof(float3)));
CUDA_SAFE_CALL( cudaMalloc( (void**)&u, width * height*sizeof(float3)));
}
#define PRINT_PIXELS
// Rendu de l'image avec CUDA
void render()
{
// map PBO to get CUDA device pointer <GY: replace with memcpy?>
//CUDA_SAFE_CALL(cudaGLMapBufferObject((void**)&d_output, pbo));
//CUDA_SAFE_CALL( cudaMemcpy( d_output, c_output, width*height*sizeof(uint), cudaMemcpyHostToDevice) );
// call CUDA kernel, writing results to PBO
CUT_SAFE_CALL(cutStartTimer(timer));
#ifdef DEBUG_RT_CUDA
render<<<gridSize, blockSize>>>(d_debug_float4, d_debug_uint, d_output, d_node, width, height, anim, obs.getDistance());
#else
render<<<gridSize, blockSize>>>(d_output, d_node, width, height, anim, obs.getDistance());
#endif
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(timer));
#ifdef DEBUG_RT_CUDA
CUDA_SAFE_CALL( cudaMemcpy( h_debug_float4, d_debug_float4, DEBUG_NUM*sizeof(float4), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy( h_debug_uint, d_debug_uint, DEBUG_NUM*sizeof(uint), cudaMemcpyDeviceToHost) );
printf("debug_float4\n");
for (int i=0; i< DEBUG_NUM; i++) {
printf("%e %e %e %e\n", h_debug_float4[i].x, h_debug_float4[i].y, h_debug_float4[i].z, h_debug_float4[i].w);
}
printf("debug_uint\n");
for (int i=0; i< DEBUG_NUM; i++) {
printf("0x%x\n", h_debug_uint[i]);
}
#endif
CUDA_SAFE_CALL( cudaMemcpy( c_output, d_output, width*height*sizeof(uint), cudaMemcpyDeviceToHost) );
unsigned long long int checksum = 0;
for (int y=(height-1); y >= 0; y--){
if (g_verbose) printf("\n");
for (int x=0; x< width; x++) {
if (g_verbose) printf("%010u ", (unsigned) c_output[x+y*width]);
checksum += c_output[x+y*width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
CUT_CHECK_ERROR("Erreur kernel");
//CUDA_SAFE_CALL(cudaGLUnmapBufferObject(pbo)); //<GY: replace with memcpy?>
}
// Affichage du resultat avec OpenGL
void display()
{
// Affichage du resultat
//glClear(GL_COLOR_BUFFER_BIT);
//CUT_SAFE_CALL(cutStartTimer(timer));
render();
//CUT_SAFE_CALL(cutStopTimer(timer));
printf("Kernel Time: %f \n", cutGetTimerValue(timer));
/*fpsCount++;
if (fpsCount == fpsLimit) {
char fps[256];
float ifps = 1.f / (cutGetAverageTimerValue(timer) / 1000.f);
sprintf(fps, "Cuda Ray Tracing: %.1f fps", ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = (int)max(ifps, 1.f);
CUT_SAFE_CALL(cutResetTimer(timer));
}*/
if( anim >= 1.0f ) pas = -0.015f;
else if( anim <= -1.0f ) pas = 0.015f;
anim += pas;
// Dessin de l'image de PBO
/*glDisable(GL_DEPTH_TEST);
glRasterPos2i(0, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glutSwapBuffers();
glutReportErrors();*/
t--;
if (!t) {
return;
}
}
/*void idle()
{
glutPostRedisplay();
}
void keyboard(unsigned char , int , int )
{
//glutPostRedisplay();
}*/
int ox, oy;
int buttonState = 0;
/*void mouse(int , int , int , int )
{
if (state == GLUT_DOWN)
buttonState |= 1<<button;
else if (state == GLUT_UP)
buttonState = 0;
ox = x; oy = y;
glutPostRedisplay();
}
void motion(int , int )
{
float dx, dy;
dx = x - ox;
dy = y - oy;
if (buttonState == 3) {
// left+middle = zoom
viewTranslation.z += dy / 100.0;
}
else if (buttonState & 2) {
// middle = translate
viewTranslation.x += dx / 100.0;
viewTranslation.y -= dy / 100.0;
}
else if (buttonState & 1) {
// left = rotate
viewRotation.x += dy / 5.0;
viewRotation.y += dx / 5.0;
}
ox = x; oy = y;
glutPostRedisplay();
}
void reshape(int x, int y)
{
width = x; height = y;
initPixelBuffer();
glViewport(0, 0, x, y);
//glViewport(-x/2, -y/2, x/2, y/2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void cleanup()
{
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
CUT_SAFE_CALL(cutDeleteTimer(timer));
}*/
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer()
{
/*if (pbo) {
// delete old buffer
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
}*/
NUM = width * height;
phi = 2.0f/(float)min(width,height);
// create pixel buffer object for display
/* glGenBuffersARB(1, &pbo);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, width*height*sizeof(GLubyte)*4, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(pbo));*/
// calculate new grid size
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(cudaSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 3) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i=3; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n");
printf("where WIDTH and HEIGHT are the screen dimensions and -v is used to display an abstract representation of the output.\n");
return 1;
}
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutResetTimer(timer));
initialize_bmp(width,height,32);
// initialise les functions callback de GLUT
/*glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow("CUDA Ray Tracing");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 GL_ARB_pixel_buffer_object")) {
fprintf(stderr, "Les extensions minimales d'OpenGL sont absentes.");
exit(-1);
}
initPixelBuffer();
initObjet();
atexit(cleanup);
glutMainLoop();*/
initObjet();
initPixelBuffer();
display();
create_bmp(c_output);
CUT_SAFE_CALL(cutDeleteTimer(timer));
return 0;
}
|
72c2364f5026437febc49451d81d20555a5ec30c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
//#define BIGNUM 99999999
#define EPSILON 0.0000001
#define NOT_PROCESSED -1
#define INACTIVE -2
#define INDEPENDENT 2
/**
* init kernel
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
init(int *s_array, int *c_array, int *cu_array, int num_nodes, int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Set the status array: not processed
c_array[tid] = -1;
cu_array[tid] = -1;
s_array[tid] = 0;
}
}
/**
* mis1 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array node status array
* @param min_array node value array
* @param cont node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
mis1(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *min_array, int *cont, int num_gpu_nodes, int num_edges
)
{
const int tx = threadIdx.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int my_task = tid;
int edge = 0;
bool local_cont = false;
/*
asm volatile
(
".reg .s32 mINDEPENDENT;\n\t" // Register for s_array addr
".reg .s32 mNOTPROCESSED;\n\t" // Register for s_array addr
".reg .s32 mINACTIVE;\n\t" // Register for s_array addr
"mov.s32 mINDEPENDENT, %0;\n\t"
"mov.s32 mNOTPROCESSED, %1;\n\t"
"mov.s32 mINACTIVE, %2;\n\t"
: // Outputs
: "r"(INDEPENDENT), "r"(NOT_PROCESSED), "r"(INACTIVE) // Inputs
);
*/
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; my_task < num_gpu_nodes; my_task += blockDim.x * gridDim.x) {
// If the vertex is not processed
if (c_array[my_task] == NOT_PROCESSED) {
local_cont = true;
// Get the start and end pointers
const int row_start = row[my_task];
const int row_end = row[my_task + 1];
const int my_node_value = node_value[my_task];
/*
asm volatile
(
".reg .u64 m99;\n\t" // Register for c_array addr
".reg .u64 m100;\n\t" // Register for node_value addr
".reg .s32 m101;\n\t" // Register for my_node_value
"mov.u64 m99, %0;\n\t"
"mov.u64 m100, %1;\n\t"
"mov.s32 m101, %2;\n\t"
: // Outputs
: "l"(c_array), "l"(min_array), "r"(my_node_value) // Inputs
);
*/
// Navigate the neighbor list and find the min
for (int edge = row_start; edge < row_end; edge++) {
const int neighbor = col[edge];
if (c_array[neighbor] == NOT_PROCESSED) {
#ifdef SYNC
atomicMin(&min_array[neighbor], my_node_value);
#else
min_array[neighbor] = (min_array[neighbor] > my_node_value) ? my_node_value : min_array[neighbor];
#endif
}
}
/*
for (edge = row_start; edge <= (row_end - 8); edge += 8) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 m1;\n\t" // Register for nid loaded from col
".reg .s32 m2;\n\t" // Register for nid loaded from col
".reg .s32 m3;\n\t" // Register for nid loaded from col
".reg .s32 m4;\n\t" // Register for nid loaded from col
".reg .s32 m5;\n\t" // Register for nid loaded from col
".reg .s32 m6;\n\t" // Register for nid loaded from col
".reg .s32 m7;\n\t" // Register for nid loaded from col
".reg .s32 m8;\n\t" // Register for nid loaded from col
".reg .u64 m17;\n\t" // Register for multiplied nid value as address
".reg .u64 m18;\n\t" // Register for multiplied nid value as address
".reg .u64 m19;\n\t" // Register for multiplied nid value as address
".reg .u64 m20;\n\t" // Register for multiplied nid value as address
".reg .u64 m21;\n\t" // Register for multiplied nid value as address
".reg .u64 m22;\n\t" // Register for multiplied nid value as address
".reg .u64 m23;\n\t" // Register for multiplied nid value as address
".reg .u64 m24;\n\t" // Register for multiplied nid value as address
".reg .u64 m25;\n\t" // Register for final address to load from c
".reg .u64 m26;\n\t" // Register for final address to load from c
".reg .u64 m27;\n\t" // Register for final address to load from c
".reg .u64 m28;\n\t" // Register for final address to load from c
".reg .u64 m29;\n\t" // Register for final address to load from c
".reg .u64 m30;\n\t" // Register for final address to load from c
".reg .u64 m31;\n\t" // Register for final address to load from c
".reg .u64 m32;\n\t" // Register for final address to load from c
".reg .s32 m33;\n\t" // Register for c
".reg .s32 m34;\n\t" // Register for c
".reg .s32 m35;\n\t" // Register for c
".reg .s32 m36;\n\t" // Register for c
".reg .s32 m37;\n\t" // Register for c
".reg .s32 m38;\n\t" // Register for c
".reg .s32 m39;\n\t" // Register for c
".reg .s32 m40;\n\t" // Register for c
".reg .u64 m65;\n\t" // Register for final address to load from min_value
".reg .u64 m66;\n\t" // Register for final address to load from min_value
".reg .u64 m67;\n\t" // Register for final address to load from min_value
".reg .u64 m68;\n\t" // Register for final address to load from min_value
".reg .u64 m69;\n\t" // Register for final address to load from min_value
".reg .u64 m70;\n\t" // Register for final address to load from min_value
".reg .u64 m71;\n\t" // Register for final address to load from min_value
".reg .u64 m72;\n\t" // Register for final address to load from min_value
".reg .s32 m73;\n\t" // Register for min_value
".reg .s32 m74;\n\t" // Register for min_value
".reg .s32 m75;\n\t" // Register for min_value
".reg .s32 m76;\n\t" // Register for min_value
".reg .s32 m77;\n\t" // Register for min_value
".reg .s32 m78;\n\t" // Register for min_value
".reg .s32 m79;\n\t" // Register for min_value
".reg .s32 m80;\n\t" // Register for min_value
".reg .pred m49;\n\t" // Register for c predicate
".reg .pred m50;\n\t" // Register for c predicate
".reg .pred m51;\n\t" // Register for c predicate
".reg .pred m52;\n\t" // Register for c predicate
".reg .pred m53;\n\t" // Register for c predicate
".reg .pred m54;\n\t" // Register for c predicate
".reg .pred m55;\n\t" // Register for c predicate
".reg .pred m56;\n\t" // Register for c predicate
"ld.s32 m1, [%0+0];\n\t" // Load nid
"ld.s32 m2, [%0+4];\n\t" // Load nid
"ld.s32 m3, [%0+8];\n\t" // Load nid
"ld.s32 m4, [%0+12];\n\t" // Load nid
"ld.s32 m5, [%0+16];\n\t" // Load nid
"ld.s32 m6, [%0+20];\n\t" // Load nid
"ld.s32 m7, [%0+24];\n\t" // Load nid
"ld.s32 m8, [%0+28];\n\t" // Load nid
"mul.wide.s32 m17, m1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m18, m2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m19, m3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m20, m4, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m21, m5, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m22, m6, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m23, m7, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m24, m8, 4;\n\t" // Multiply nid for address calculation
"add.u64 m25, m99, m17;\n\t" // Final address calculation for c
"add.u64 m26, m99, m18;\n\t" // Final address calculation for c
"add.u64 m27, m99, m19;\n\t" // Final address calculation for c
"add.u64 m28, m99, m20;\n\t" // Final address calculation for c
"add.u64 m29, m99, m21;\n\t" // Final address calculation for c
"add.u64 m30, m99, m22;\n\t" // Final address calculation for c
"add.u64 m31, m99, m23;\n\t" // Final address calculation for c
"add.u64 m32, m99, m24;\n\t" // Final address calculation for c
"add.u64 m65, m100, m17;\n\t" // Final address calculation for min_value
"add.u64 m66, m100, m18;\n\t" // Final address calculation for min_value
"add.u64 m67, m100, m19;\n\t" // Final address calculation for min_value
"add.u64 m68, m100, m20;\n\t" // Final address calculation for min_value
"add.u64 m69, m100, m21;\n\t" // Final address calculation for min_value
"add.u64 m70, m100, m22;\n\t" // Final address calculation for min_value
"add.u64 m71, m100, m23;\n\t" // Final address calculation for min_value
"add.u64 m72, m100, m24;\n\t" // Final address calculation for min_value
"ld.s32 m33, [m25];\n\t" // Load c
"ld.s32 m34, [m26];\n\t" // Load c
"ld.s32 m35, [m27];\n\t" // Load c
"ld.s32 m36, [m28];\n\t" // Load c
"ld.s32 m37, [m29];\n\t" // Load c
"ld.s32 m38, [m30];\n\t" // Load c
"ld.s32 m39, [m31];\n\t" // Load c
"ld.s32 m40, [m32];\n\t" // Load c
"setp.eq.s32 m49, m33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m50, m34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m51, m35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m52, m36, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m53, m37, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m54, m38, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m55, m39, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m56, m40, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_8_1:\n\t" // Predicate body
"@!m49 bra MIS1_PRED_BODY_8_2;\n\t" // Predicate on value of c
"atom.min.s32 m73, [m65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_2:\n\t" // Predicate body
"@!m50 bra MIS1_PRED_BODY_8_3;\n\t" // Predicate on value of c
"atom.min.s32 m74, [m66], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_3:\n\t" // Predicate body
"@!m51 bra MIS1_PRED_BODY_8_4;\n\t" // Predicate on value of c
"atom.min.s32 m75, [m67], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_4:\n\t" // Predicate body
"@!m52 bra MIS1_PRED_BODY_8_5;\n\t" // Predicate on value of c
"atom.min.s32 m76, [m68], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_5:\n\t" // Predicate body
"@!m53 bra MIS1_PRED_BODY_8_6;\n\t" // Predicate on value of c
"atom.min.s32 m77, [m69], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_6:\n\t" // Predicate body
"@!m54 bra MIS1_PRED_BODY_8_7;\n\t" // Predicate on value of c
"atom.min.s32 m78, [m70], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_7:\n\t" // Predicate body
"@!m55 bra MIS1_PRED_BODY_8_8;\n\t" // Predicate on value of c
"atom.min.s32 m79, [m71], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_8:\n\t" // Predicate body
"@!m56 bra MIS1_NEIGH_LOOP_8;\n\t" // Predicate on value of c
"atom.min.s32 m80, [m72], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_8:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 4); edge += 4) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 q1;\n\t" // Register for nid loaded from col
".reg .s32 q2;\n\t" // Register for nid loaded from col
".reg .s32 q3;\n\t" // Register for nid loaded from col
".reg .s32 q4;\n\t" // Register for nid loaded from col
".reg .u64 q17;\n\t" // Register for multiplied nid value as address
".reg .u64 q18;\n\t" // Register for multiplied nid value as address
".reg .u64 q19;\n\t" // Register for multiplied nid value as address
".reg .u64 q20;\n\t" // Register for multiplied nid value as address
".reg .u64 q25;\n\t" // Register for final address to load from c
".reg .u64 q26;\n\t" // Register for final address to load from c
".reg .u64 q27;\n\t" // Register for final address to load from c
".reg .u64 q28;\n\t" // Register for final address to load from c
".reg .s32 q33;\n\t" // Register for c
".reg .s32 q34;\n\t" // Register for c
".reg .s32 q35;\n\t" // Register for c
".reg .s32 q36;\n\t" // Register for c
".reg .u64 q65;\n\t" // Register for final address to load from min_value
".reg .u64 q66;\n\t" // Register for final address to load from min_value
".reg .u64 q67;\n\t" // Register for final address to load from min_value
".reg .u64 q68;\n\t" // Register for final address to load from min_value
".reg .s32 q73;\n\t" // Register for node_value
".reg .s32 q74;\n\t" // Register for node_value
".reg .s32 q75;\n\t" // Register for node_value
".reg .s32 q76;\n\t" // Register for node_value
".reg .pred q49;\n\t" // Register for c predicate
".reg .pred q50;\n\t" // Register for c predicate
".reg .pred q51;\n\t" // Register for c predicate
".reg .pred q52;\n\t" // Register for c predicate
"ld.s32 q1, [%0+0];\n\t" // Load nid
"ld.s32 q2, [%0+4];\n\t" // Load nid
"ld.s32 q3, [%0+8];\n\t" // Load nid
"ld.s32 q4, [%0+12];\n\t" // Load nid
"mul.wide.s32 q17, q1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q18, q2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q19, q3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q20, q4, 4;\n\t" // Multiply nid for address calculation
"add.u64 q25, m99, q17;\n\t" // Final address calculation for c
"add.u64 q26, m99, q18;\n\t" // Final address calculation for c
"add.u64 q27, m99, q19;\n\t" // Final address calculation for c
"add.u64 q28, m99, q20;\n\t" // Final address calculation for c
"add.u64 q65, m100, q17;\n\t" // Final address calculation for node_value
"add.u64 q66, m100, q18;\n\t" // Final address calculation for node_value
"add.u64 q67, m100, q19;\n\t" // Final address calculation for node_value
"add.u64 q68, m100, q20;\n\t" // Final address calculation for node_value
"ld.s32 q33, [q25];\n\t" // Load c
"ld.s32 q34, [q26];\n\t" // Load c
"ld.s32 q35, [q27];\n\t" // Load c
"ld.s32 q36, [q28];\n\t" // Load c
"setp.eq.s32 q49, q33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q50, q34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q51, q35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q52, q36, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_4_1:\n\t" // Predicate body
"@!q49 bra MIS1_PRED_BODY_4_2;\n\t" // Predicate on value of c
"atom.min.s32 q73, [q65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_2:\n\t" // Predicate body
"@!q50 bra MIS1_PRED_BODY_4_3;\n\t" // Predicate on value of c
"atom.min.s32 q74, [q66], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_3:\n\t" // Predicate body
"@!q51 bra MIS1_PRED_BODY_4_4;\n\t" // Predicate on value of c
"atom.min.s32 q75, [q67], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_4:\n\t" // Predicate body
"@!q52 bra MIS1_NEIGH_LOOP_4;\n\t" // Predicate on value of c
"atom.min.s32 q76, [q68], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_4:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 2); edge += 2) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 t1;\n\t" // Register for nid loaded from col
".reg .s32 t2;\n\t" // Register for nid loaded from col
".reg .u64 t17;\n\t" // Register for multiplied nid value as address
".reg .u64 t18;\n\t" // Register for multiplied nid value as address
".reg .u64 t25;\n\t" // Register for final address to load from c
".reg .u64 t26;\n\t" // Register for final address to load from c
".reg .s32 t33;\n\t" // Register for c
".reg .s32 t34;\n\t" // Register for c
".reg .u64 t65;\n\t" // Register for final address to load from node_value
".reg .u64 t66;\n\t" // Register for final address to load from node_value
".reg .s32 t73;\n\t" // Register for node_value
".reg .s32 t74;\n\t" // Register for node_value
".reg .pred t49;\n\t" // Register for c predicate
".reg .pred t50;\n\t" // Register for c predicate
"ld.s32 t1, [%0+0];\n\t" // Load nid
"ld.s32 t2, [%0+4];\n\t" // Load nid
"mul.wide.s32 t17, t1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 t18, t2, 4;\n\t" // Multiply nid for address calculation
"add.u64 t25, m99, t17;\n\t" // Final address calculation for c
"add.u64 t26, m99, t18;\n\t" // Final address calculation for c
"add.u64 t65, m100, t17;\n\t" // Final address calculation for node_value
"add.u64 t66, m100, t18;\n\t" // Final address calculation for node_value
"ld.s32 t33, [t25];\n\t" // Load c
"ld.s32 t34, [t26];\n\t" // Load c
"setp.eq.s32 t49, t33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 t50, t34, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_2_1:\n\t" // Predicate body
"@!t49 bra MIS1_PRED_BODY_2_2;\n\t" // Predicate on value of c
"atom.min.s32 t73, [t65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_2_2:\n\t" // Predicate body
"@!t50 bra MIS1_NEIGH_LOOP_2;\n\t" // Predicate on value of c
"atom.min.s32 t74, [t66], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_2:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge < row_end; edge++) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 a1;\n\t" // Register for nid loaded from col
".reg .u64 a17;\n\t" // Register for multiplied nid value as address
".reg .u64 a25;\n\t" // Register for final address to load from c
".reg .s32 a33;\n\t" // Register for c
".reg .u64 a65;\n\t" // Register for final address to load from node_value
".reg .s32 a73;\n\t" // Register for node_value
".reg .pred a49;\n\t" // Register for s predicate
"ld.s32 a1, [%0+0];\n\t" // Load nid
"mul.wide.s32 a17, a1, 4;\n\t" // Multiply nid for s address calculation
"add.u64 a25, m99, a17;\n\t" // Final address calculation for c
"add.u64 a65, m100, a17;\n\t" // Final address calculation for node_value
"ld.s32 a33, [a25];\n\t" // Load c
"setp.eq.s32 a49, a33, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_1_1:\n\t" // Predicate body
"@!a49 bra MIS1_NEIGH_LOOP_1;\n\t" // Predicate on value of c
"atom.min.s32 a73, [a65], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_1:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
*/
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
cont[tid] = local_cont;
}
/**
* mis2 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param min_array node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void mis2(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *cu_array, int *min_array, int num_gpu_nodes, int num_edges)
{
const int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int edge = 0;
/*
asm volatile
(
".reg .s32 mINDEPENDENT;\n\t" // Register for s_array addr
".reg .s32 mNOTPROCESSED;\n\t" // Register for s_array addr
".reg .s32 mINACTIVE;\n\t" // Register for s_array addr
"mov.s32 mINDEPENDENT, %0;\n\t"
"mov.s32 mNOTPROCESSED, %1;\n\t"
"mov.s32 mINACTIVE, %2;\n\t"
: // Outputs
: "r"(INDEPENDENT), "r"(NOT_PROCESSED), "r"(INACTIVE) // Inputs
);
*/
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; tid < num_gpu_nodes; tid += blockDim.x * gridDim.x) {
#ifdef SYNC
const int my_min_value = atomicAdd(&min_array[tid], 0);
#else
const int my_min_value = min_array[tid];
#endif
if (node_value[tid] <= my_min_value && c_array[tid] == NOT_PROCESSED) {
// -1: Not processed -2: Inactive 2: Independent set
// Put the item into the independent set
s_array[tid] = INDEPENDENT;
// Get the start and end pointers
const int row_start = row[tid];
const int row_end = row[tid + 1];
// Set the status to inactive
//cu_array[tid] = INACTIVE;
#ifdef SYNC
atomicExch(&cu_array[tid], INACTIVE);
#else
cu_array[tid] = INACTIVE;
#endif
/*
asm volatile
(
".reg .u64 m99;\n\t" // Register for c_array addr
".reg .u64 m100;\n\t" // Register for cu_array addr
"mov.u64 m99, %0;\n\t"
"mov.u64 m100, %1;\n\t"
: // Outputs
: "l"(c_array), "l"(cu_array) // Inputs
);
*/
// Mark all the neighbors inactive
for (int edge = row_start; edge < row_end; edge++) {
const int neighbor = col[edge];
if (c_array[neighbor] == NOT_PROCESSED) {
//use status update array to avoid race
//cu_array[neighbor] = INACTIVE;
#ifdef SYNC
atomicExch(&cu_array[neighbor], INACTIVE);
#else
cu_array[neighbor] = INACTIVE;
#endif
}
}
/*
for (edge = row_start; edge <= (row_end - 8); edge += 8) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 m1;\n\t" // Register for nid loaded from col
".reg .s32 m2;\n\t" // Register for nid loaded from col
".reg .s32 m3;\n\t" // Register for nid loaded from col
".reg .s32 m4;\n\t" // Register for nid loaded from col
".reg .s32 m5;\n\t" // Register for nid loaded from col
".reg .s32 m6;\n\t" // Register for nid loaded from col
".reg .s32 m7;\n\t" // Register for nid loaded from col
".reg .s32 m8;\n\t" // Register for nid loaded from col
".reg .u64 m17;\n\t" // Register for multiplied nid value as address
".reg .u64 m18;\n\t" // Register for multiplied nid value as address
".reg .u64 m19;\n\t" // Register for multiplied nid value as address
".reg .u64 m20;\n\t" // Register for multiplied nid value as address
".reg .u64 m21;\n\t" // Register for multiplied nid value as address
".reg .u64 m22;\n\t" // Register for multiplied nid value as address
".reg .u64 m23;\n\t" // Register for multiplied nid value as address
".reg .u64 m24;\n\t" // Register for multiplied nid value as address
".reg .u64 m25;\n\t" // Register for final address to load from c
".reg .u64 m26;\n\t" // Register for final address to load from c
".reg .u64 m27;\n\t" // Register for final address to load from c
".reg .u64 m28;\n\t" // Register for final address to load from c
".reg .u64 m29;\n\t" // Register for final address to load from c
".reg .u64 m30;\n\t" // Register for final address to load from c
".reg .u64 m31;\n\t" // Register for final address to load from c
".reg .u64 m32;\n\t" // Register for final address to load from c
".reg .s32 m33;\n\t" // Register for c
".reg .s32 m34;\n\t" // Register for c
".reg .s32 m35;\n\t" // Register for c
".reg .s32 m36;\n\t" // Register for c
".reg .s32 m37;\n\t" // Register for c
".reg .s32 m38;\n\t" // Register for c
".reg .s32 m39;\n\t" // Register for c
".reg .s32 m40;\n\t" // Register for c
".reg .u64 m65;\n\t" // Register for final address to load from cu
".reg .u64 m66;\n\t" // Register for final address to load from cu
".reg .u64 m67;\n\t" // Register for final address to load from cu
".reg .u64 m68;\n\t" // Register for final address to load from cu
".reg .u64 m69;\n\t" // Register for final address to load from cu
".reg .u64 m70;\n\t" // Register for final address to load from cu
".reg .u64 m71;\n\t" // Register for final address to load from cu
".reg .u64 m72;\n\t" // Register for final address to load from cu
".reg .b32 m73;\n\t" // Register for cu
".reg .b32 m74;\n\t" // Register for cu
".reg .b32 m75;\n\t" // Register for cu
".reg .b32 m76;\n\t" // Register for cu
".reg .b32 m77;\n\t" // Register for cu
".reg .b32 m78;\n\t" // Register for cu
".reg .b32 m79;\n\t" // Register for cu
".reg .b32 m80;\n\t" // Register for cu
".reg .pred m49;\n\t" // Register for c predicate
".reg .pred m50;\n\t" // Register for c predicate
".reg .pred m51;\n\t" // Register for c predicate
".reg .pred m52;\n\t" // Register for c predicate
".reg .pred m53;\n\t" // Register for c predicate
".reg .pred m54;\n\t" // Register for c predicate
".reg .pred m55;\n\t" // Register for c predicate
".reg .pred m56;\n\t" // Register for c predicate
"ld.s32 m1, [%0+0];\n\t" // Load nid
"ld.s32 m2, [%0+4];\n\t" // Load nid
"ld.s32 m3, [%0+8];\n\t" // Load nid
"ld.s32 m4, [%0+12];\n\t" // Load nid
"ld.s32 m5, [%0+16];\n\t" // Load nid
"ld.s32 m6, [%0+20];\n\t" // Load nid
"ld.s32 m7, [%0+24];\n\t" // Load nid
"ld.s32 m8, [%0+28];\n\t" // Load nid
"mul.wide.s32 m17, m1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m18, m2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m19, m3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m20, m4, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m21, m5, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m22, m6, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m23, m7, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m24, m8, 4;\n\t" // Multiply nid for address calculation
"add.u64 m25, m99, m17;\n\t" // Final address calculation for c
"add.u64 m26, m99, m18;\n\t" // Final address calculation for c
"add.u64 m27, m99, m19;\n\t" // Final address calculation for c
"add.u64 m28, m99, m20;\n\t" // Final address calculation for c
"add.u64 m29, m99, m21;\n\t" // Final address calculation for c
"add.u64 m30, m99, m22;\n\t" // Final address calculation for c
"add.u64 m31, m99, m23;\n\t" // Final address calculation for c
"add.u64 m32, m99, m24;\n\t" // Final address calculation for c
"add.u64 m65, m100, m17;\n\t" // Final address calculation for cu
"add.u64 m66, m100, m18;\n\t" // Final address calculation for cu
"add.u64 m67, m100, m19;\n\t" // Final address calculation for cu
"add.u64 m68, m100, m20;\n\t" // Final address calculation for cu
"add.u64 m69, m100, m21;\n\t" // Final address calculation for cu
"add.u64 m70, m100, m22;\n\t" // Final address calculation for cu
"add.u64 m71, m100, m23;\n\t" // Final address calculation for cu
"add.u64 m72, m100, m24;\n\t" // Final address calculation for cu
"ld.s32 m33, [m25];\n\t" // Load c
"ld.s32 m34, [m26];\n\t" // Load c
"ld.s32 m35, [m27];\n\t" // Load c
"ld.s32 m36, [m28];\n\t" // Load c
"ld.s32 m37, [m29];\n\t" // Load c
"ld.s32 m38, [m30];\n\t" // Load c
"ld.s32 m39, [m31];\n\t" // Load c
"ld.s32 m40, [m32];\n\t" // Load c
"setp.eq.s32 m49, m33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m50, m34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m51, m35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m52, m36, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m53, m37, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m54, m38, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m55, m39, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m56, m40, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_8_1:\n\t" // Predicate body
"@!m49 bra MIS2_PRED_BODY_8_2;\n\t" // Predicate on value of c
"atom.or.b32 m73, [m65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_2:\n\t" // Predicate body
"@!m50 bra MIS2_PRED_BODY_8_3;\n\t" // Predicate on value of c
"atom.or.b32 m74, [m66], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_3:\n\t" // Predicate body
"@!m51 bra MIS2_PRED_BODY_8_4;\n\t" // Predicate on value of c
"atom.or.b32 m75, [m67], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_4:\n\t" // Predicate body
"@!m52 bra MIS2_PRED_BODY_8_5;\n\t" // Predicate on value of c
"atom.or.b32 m76, [m68], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_5:\n\t" // Predicate body
"@!m53 bra MIS2_PRED_BODY_8_6;\n\t" // Predicate on value of c
"atom.or.b32 m77, [m69], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_6:\n\t" // Predicate body
"@!m54 bra MIS2_PRED_BODY_8_7;\n\t" // Predicate on value of c
"atom.or.b32 m78, [m70], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_7:\n\t" // Predicate body
"@!m55 bra MIS2_PRED_BODY_8_8;\n\t" // Predicate on value of c
"atom.or.b32 m79, [m71], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_8:\n\t" // Predicate body
"@!m56 bra MIS2_NEIGH_LOOP_8;\n\t" // Predicate on value of c
"atom.or.b32 m80, [m72], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_8:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 4); edge += 4) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 q1;\n\t" // Register for nid loaded from col
".reg .s32 q2;\n\t" // Register for nid loaded from col
".reg .s32 q3;\n\t" // Register for nid loaded from col
".reg .s32 q4;\n\t" // Register for nid loaded from col
".reg .u64 q17;\n\t" // Register for multiplied nid value as address
".reg .u64 q18;\n\t" // Register for multiplied nid value as address
".reg .u64 q19;\n\t" // Register for multiplied nid value as address
".reg .u64 q20;\n\t" // Register for multiplied nid value as address
".reg .u64 q25;\n\t" // Register for final address to load from c
".reg .u64 q26;\n\t" // Register for final address to load from c
".reg .u64 q27;\n\t" // Register for final address to load from c
".reg .u64 q28;\n\t" // Register for final address to load from c
".reg .s32 q33;\n\t" // Register for c
".reg .s32 q34;\n\t" // Register for c
".reg .s32 q35;\n\t" // Register for c
".reg .s32 q36;\n\t" // Register for c
".reg .u64 q65;\n\t" // Register for final address to load from cu
".reg .u64 q66;\n\t" // Register for final address to load from cu
".reg .u64 q67;\n\t" // Register for final address to load from cu
".reg .u64 q68;\n\t" // Register for final address to load from cu
".reg .b32 q73;\n\t" // Register for cu
".reg .b32 q74;\n\t" // Register for cu
".reg .b32 q75;\n\t" // Register for cu
".reg .b32 q76;\n\t" // Register for cu
".reg .pred q49;\n\t" // Register for c predicate
".reg .pred q50;\n\t" // Register for c predicate
".reg .pred q51;\n\t" // Register for c predicate
".reg .pred q52;\n\t" // Register for c predicate
"ld.s32 q1, [%0+0];\n\t" // Load nid
"ld.s32 q2, [%0+4];\n\t" // Load nid
"ld.s32 q3, [%0+8];\n\t" // Load nid
"ld.s32 q4, [%0+12];\n\t" // Load nid
"mul.wide.s32 q17, q1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q18, q2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q19, q3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q20, q4, 4;\n\t" // Multiply nid for address calculation
"add.u64 q25, m99, q17;\n\t" // Final address calculation for c
"add.u64 q26, m99, q18;\n\t" // Final address calculation for c
"add.u64 q27, m99, q19;\n\t" // Final address calculation for c
"add.u64 q28, m99, q20;\n\t" // Final address calculation for c
"add.u64 q65, m100, q17;\n\t" // Final address calculation for cu
"add.u64 q66, m100, q18;\n\t" // Final address calculation for cu
"add.u64 q67, m100, q19;\n\t" // Final address calculation for cu
"add.u64 q68, m100, q20;\n\t" // Final address calculation for cu
"ld.s32 q33, [q25];\n\t" // Load c
"ld.s32 q34, [q26];\n\t" // Load c
"ld.s32 q35, [q27];\n\t" // Load c
"ld.s32 q36, [q28];\n\t" // Load c
"setp.eq.s32 q49, q33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q50, q34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q51, q35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q52, q36, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_4_1:\n\t" // Predicate body
"@!q49 bra MIS2_PRED_BODY_4_2;\n\t" // Predicate on value of c
"atom.or.b32 q73, [q65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_2:\n\t" // Predicate body
"@!q50 bra MIS2_PRED_BODY_4_3;\n\t" // Predicate on value of c
"atom.or.b32 q74, [q66], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_3:\n\t" // Predicate body
"@!q51 bra MIS2_PRED_BODY_4_4;\n\t" // Predicate on value of c
"atom.or.b32 q75, [q67], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_4:\n\t" // Predicate body
"@!q52 bra MIS2_NEIGH_LOOP_4;\n\t" // Predicate on value of c
"atom.or.b32 q76, [q68], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_4:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 2); edge += 2) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 t1;\n\t" // Register for nid loaded from col
".reg .s32 t2;\n\t" // Register for nid loaded from col
".reg .u64 t17;\n\t" // Register for multiplied nid value as address
".reg .u64 t18;\n\t" // Register for multiplied nid value as address
".reg .u64 t25;\n\t" // Register for final address to load from c
".reg .u64 t26;\n\t" // Register for final address to load from c
".reg .s32 t33;\n\t" // Register for c
".reg .s32 t34;\n\t" // Register for c
".reg .u64 t65;\n\t" // Register for final address to load from cu
".reg .u64 t66;\n\t" // Register for final address to load from cu
".reg .b32 t73;\n\t" // Register for cu
".reg .b32 t74;\n\t" // Register for cu
".reg .pred t49;\n\t" // Register for c predicate
".reg .pred t50;\n\t" // Register for c predicate
"ld.s32 t1, [%0+0];\n\t" // Load nid
"ld.s32 t2, [%0+4];\n\t" // Load nid
"mul.wide.s32 t17, t1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 t18, t2, 4;\n\t" // Multiply nid for address calculation
"add.u64 t25, m99, t17;\n\t" // Final address calculation for c
"add.u64 t26, m99, t18;\n\t" // Final address calculation for c
"add.u64 t65, m100, t17;\n\t" // Final address calculation for cu
"add.u64 t66, m100, t18;\n\t" // Final address calculation for cu
"ld.s32 t33, [t25];\n\t" // Load c
"ld.s32 t34, [t26];\n\t" // Load c
"setp.eq.s32 t49, t33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 t50, t34, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_2_1:\n\t" // Predicate body
"@!t49 bra MIS2_PRED_BODY_2_2;\n\t" // Predicate on value of c
"atom.or.b32 t73, [t65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_2_2:\n\t" // Predicate body
"@!t50 bra MIS2_NEIGH_LOOP_2;\n\t" // Predicate on value of c
"atom.or.b32 t74, [t66], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_2:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge < row_end; edge++) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 a1;\n\t" // Register for nid loaded from col
".reg .u64 a17;\n\t" // Register for multiplied nid value as address
".reg .u64 a25;\n\t" // Register for final address to load from c
".reg .s32 a33;\n\t" // Register for c
".reg .u64 a65;\n\t" // Register for final address to load from cu
".reg .b32 a73;\n\t" // Register for cu
".reg .pred a49;\n\t" // Register for s predicate
"ld.s32 a1, [%0+0];\n\t" // Load nid
"mul.wide.s32 a17, a1, 4;\n\t" // Multiply nid for address calculation
"add.u64 a25, m99, a17;\n\t" // Final address calculation for c
"add.u64 a65, m100, a17;\n\t" // Final address calculation for cu
"ld.s32 a33, [a25];\n\t" // Load c
"setp.eq.s32 a49, a33, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_1_1:\n\t" // Predicate body
"@!a49 bra MIS2_NEIGH_LOOP_1;\n\t" // Predicate on value of c
"atom.or.b32 a73, [a65], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_1:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
*/
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
}
/**
* mis3 kernel
* @param cu_array status update array
* @param c_array status array
* @param num_nodes number of vertices
*/
__global__ void
mis3(int *cu_array, int *c_array, int *min_array, int num_gpu_nodes)
{
const int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; tid < num_gpu_nodes; tid += blockDim.x * gridDim.x) {
//set the status array
#ifdef SYNC
const int status = atomicAdd(&cu_array[tid], 0);
#else
const int status = cu_array[tid];
#endif
if (status == INACTIVE) {
c_array[tid] = status;
} else {
#ifdef SYNC
atomicExch(&min_array[tid], INT_MAX);
#else
min_array[tid] = INT_MAX;
#endif
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
}
|
72c2364f5026437febc49451d81d20555a5ec30c.cu
|
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
//#define BIGNUM 99999999
#define EPSILON 0.0000001
#define NOT_PROCESSED -1
#define INACTIVE -2
#define INDEPENDENT 2
/**
* init kernel
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
init(int *s_array, int *c_array, int *cu_array, int num_nodes, int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Set the status array: not processed
c_array[tid] = -1;
cu_array[tid] = -1;
s_array[tid] = 0;
}
}
/**
* mis1 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array node status array
* @param min_array node value array
* @param cont node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
mis1(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *min_array, int *cont, int num_gpu_nodes, int num_edges
)
{
const int tx = threadIdx.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int my_task = tid;
int edge = 0;
bool local_cont = false;
/*
asm volatile
(
".reg .s32 mINDEPENDENT;\n\t" // Register for s_array addr
".reg .s32 mNOTPROCESSED;\n\t" // Register for s_array addr
".reg .s32 mINACTIVE;\n\t" // Register for s_array addr
"mov.s32 mINDEPENDENT, %0;\n\t"
"mov.s32 mNOTPROCESSED, %1;\n\t"
"mov.s32 mINACTIVE, %2;\n\t"
: // Outputs
: "r"(INDEPENDENT), "r"(NOT_PROCESSED), "r"(INACTIVE) // Inputs
);
*/
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; my_task < num_gpu_nodes; my_task += blockDim.x * gridDim.x) {
// If the vertex is not processed
if (c_array[my_task] == NOT_PROCESSED) {
local_cont = true;
// Get the start and end pointers
const int row_start = row[my_task];
const int row_end = row[my_task + 1];
const int my_node_value = node_value[my_task];
/*
asm volatile
(
".reg .u64 m99;\n\t" // Register for c_array addr
".reg .u64 m100;\n\t" // Register for node_value addr
".reg .s32 m101;\n\t" // Register for my_node_value
"mov.u64 m99, %0;\n\t"
"mov.u64 m100, %1;\n\t"
"mov.s32 m101, %2;\n\t"
: // Outputs
: "l"(c_array), "l"(min_array), "r"(my_node_value) // Inputs
);
*/
// Navigate the neighbor list and find the min
for (int edge = row_start; edge < row_end; edge++) {
const int neighbor = col[edge];
if (c_array[neighbor] == NOT_PROCESSED) {
#ifdef SYNC
atomicMin(&min_array[neighbor], my_node_value);
#else
min_array[neighbor] = (min_array[neighbor] > my_node_value) ? my_node_value : min_array[neighbor];
#endif
}
}
/*
for (edge = row_start; edge <= (row_end - 8); edge += 8) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 m1;\n\t" // Register for nid loaded from col
".reg .s32 m2;\n\t" // Register for nid loaded from col
".reg .s32 m3;\n\t" // Register for nid loaded from col
".reg .s32 m4;\n\t" // Register for nid loaded from col
".reg .s32 m5;\n\t" // Register for nid loaded from col
".reg .s32 m6;\n\t" // Register for nid loaded from col
".reg .s32 m7;\n\t" // Register for nid loaded from col
".reg .s32 m8;\n\t" // Register for nid loaded from col
".reg .u64 m17;\n\t" // Register for multiplied nid value as address
".reg .u64 m18;\n\t" // Register for multiplied nid value as address
".reg .u64 m19;\n\t" // Register for multiplied nid value as address
".reg .u64 m20;\n\t" // Register for multiplied nid value as address
".reg .u64 m21;\n\t" // Register for multiplied nid value as address
".reg .u64 m22;\n\t" // Register for multiplied nid value as address
".reg .u64 m23;\n\t" // Register for multiplied nid value as address
".reg .u64 m24;\n\t" // Register for multiplied nid value as address
".reg .u64 m25;\n\t" // Register for final address to load from c
".reg .u64 m26;\n\t" // Register for final address to load from c
".reg .u64 m27;\n\t" // Register for final address to load from c
".reg .u64 m28;\n\t" // Register for final address to load from c
".reg .u64 m29;\n\t" // Register for final address to load from c
".reg .u64 m30;\n\t" // Register for final address to load from c
".reg .u64 m31;\n\t" // Register for final address to load from c
".reg .u64 m32;\n\t" // Register for final address to load from c
".reg .s32 m33;\n\t" // Register for c
".reg .s32 m34;\n\t" // Register for c
".reg .s32 m35;\n\t" // Register for c
".reg .s32 m36;\n\t" // Register for c
".reg .s32 m37;\n\t" // Register for c
".reg .s32 m38;\n\t" // Register for c
".reg .s32 m39;\n\t" // Register for c
".reg .s32 m40;\n\t" // Register for c
".reg .u64 m65;\n\t" // Register for final address to load from min_value
".reg .u64 m66;\n\t" // Register for final address to load from min_value
".reg .u64 m67;\n\t" // Register for final address to load from min_value
".reg .u64 m68;\n\t" // Register for final address to load from min_value
".reg .u64 m69;\n\t" // Register for final address to load from min_value
".reg .u64 m70;\n\t" // Register for final address to load from min_value
".reg .u64 m71;\n\t" // Register for final address to load from min_value
".reg .u64 m72;\n\t" // Register for final address to load from min_value
".reg .s32 m73;\n\t" // Register for min_value
".reg .s32 m74;\n\t" // Register for min_value
".reg .s32 m75;\n\t" // Register for min_value
".reg .s32 m76;\n\t" // Register for min_value
".reg .s32 m77;\n\t" // Register for min_value
".reg .s32 m78;\n\t" // Register for min_value
".reg .s32 m79;\n\t" // Register for min_value
".reg .s32 m80;\n\t" // Register for min_value
".reg .pred m49;\n\t" // Register for c predicate
".reg .pred m50;\n\t" // Register for c predicate
".reg .pred m51;\n\t" // Register for c predicate
".reg .pred m52;\n\t" // Register for c predicate
".reg .pred m53;\n\t" // Register for c predicate
".reg .pred m54;\n\t" // Register for c predicate
".reg .pred m55;\n\t" // Register for c predicate
".reg .pred m56;\n\t" // Register for c predicate
"ld.s32 m1, [%0+0];\n\t" // Load nid
"ld.s32 m2, [%0+4];\n\t" // Load nid
"ld.s32 m3, [%0+8];\n\t" // Load nid
"ld.s32 m4, [%0+12];\n\t" // Load nid
"ld.s32 m5, [%0+16];\n\t" // Load nid
"ld.s32 m6, [%0+20];\n\t" // Load nid
"ld.s32 m7, [%0+24];\n\t" // Load nid
"ld.s32 m8, [%0+28];\n\t" // Load nid
"mul.wide.s32 m17, m1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m18, m2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m19, m3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m20, m4, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m21, m5, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m22, m6, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m23, m7, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m24, m8, 4;\n\t" // Multiply nid for address calculation
"add.u64 m25, m99, m17;\n\t" // Final address calculation for c
"add.u64 m26, m99, m18;\n\t" // Final address calculation for c
"add.u64 m27, m99, m19;\n\t" // Final address calculation for c
"add.u64 m28, m99, m20;\n\t" // Final address calculation for c
"add.u64 m29, m99, m21;\n\t" // Final address calculation for c
"add.u64 m30, m99, m22;\n\t" // Final address calculation for c
"add.u64 m31, m99, m23;\n\t" // Final address calculation for c
"add.u64 m32, m99, m24;\n\t" // Final address calculation for c
"add.u64 m65, m100, m17;\n\t" // Final address calculation for min_value
"add.u64 m66, m100, m18;\n\t" // Final address calculation for min_value
"add.u64 m67, m100, m19;\n\t" // Final address calculation for min_value
"add.u64 m68, m100, m20;\n\t" // Final address calculation for min_value
"add.u64 m69, m100, m21;\n\t" // Final address calculation for min_value
"add.u64 m70, m100, m22;\n\t" // Final address calculation for min_value
"add.u64 m71, m100, m23;\n\t" // Final address calculation for min_value
"add.u64 m72, m100, m24;\n\t" // Final address calculation for min_value
"ld.s32 m33, [m25];\n\t" // Load c
"ld.s32 m34, [m26];\n\t" // Load c
"ld.s32 m35, [m27];\n\t" // Load c
"ld.s32 m36, [m28];\n\t" // Load c
"ld.s32 m37, [m29];\n\t" // Load c
"ld.s32 m38, [m30];\n\t" // Load c
"ld.s32 m39, [m31];\n\t" // Load c
"ld.s32 m40, [m32];\n\t" // Load c
"setp.eq.s32 m49, m33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m50, m34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m51, m35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m52, m36, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m53, m37, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m54, m38, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m55, m39, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m56, m40, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_8_1:\n\t" // Predicate body
"@!m49 bra MIS1_PRED_BODY_8_2;\n\t" // Predicate on value of c
"atom.min.s32 m73, [m65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_2:\n\t" // Predicate body
"@!m50 bra MIS1_PRED_BODY_8_3;\n\t" // Predicate on value of c
"atom.min.s32 m74, [m66], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_3:\n\t" // Predicate body
"@!m51 bra MIS1_PRED_BODY_8_4;\n\t" // Predicate on value of c
"atom.min.s32 m75, [m67], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_4:\n\t" // Predicate body
"@!m52 bra MIS1_PRED_BODY_8_5;\n\t" // Predicate on value of c
"atom.min.s32 m76, [m68], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_5:\n\t" // Predicate body
"@!m53 bra MIS1_PRED_BODY_8_6;\n\t" // Predicate on value of c
"atom.min.s32 m77, [m69], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_6:\n\t" // Predicate body
"@!m54 bra MIS1_PRED_BODY_8_7;\n\t" // Predicate on value of c
"atom.min.s32 m78, [m70], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_7:\n\t" // Predicate body
"@!m55 bra MIS1_PRED_BODY_8_8;\n\t" // Predicate on value of c
"atom.min.s32 m79, [m71], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_8_8:\n\t" // Predicate body
"@!m56 bra MIS1_NEIGH_LOOP_8;\n\t" // Predicate on value of c
"atom.min.s32 m80, [m72], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_8:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 4); edge += 4) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 q1;\n\t" // Register for nid loaded from col
".reg .s32 q2;\n\t" // Register for nid loaded from col
".reg .s32 q3;\n\t" // Register for nid loaded from col
".reg .s32 q4;\n\t" // Register for nid loaded from col
".reg .u64 q17;\n\t" // Register for multiplied nid value as address
".reg .u64 q18;\n\t" // Register for multiplied nid value as address
".reg .u64 q19;\n\t" // Register for multiplied nid value as address
".reg .u64 q20;\n\t" // Register for multiplied nid value as address
".reg .u64 q25;\n\t" // Register for final address to load from c
".reg .u64 q26;\n\t" // Register for final address to load from c
".reg .u64 q27;\n\t" // Register for final address to load from c
".reg .u64 q28;\n\t" // Register for final address to load from c
".reg .s32 q33;\n\t" // Register for c
".reg .s32 q34;\n\t" // Register for c
".reg .s32 q35;\n\t" // Register for c
".reg .s32 q36;\n\t" // Register for c
".reg .u64 q65;\n\t" // Register for final address to load from min_value
".reg .u64 q66;\n\t" // Register for final address to load from min_value
".reg .u64 q67;\n\t" // Register for final address to load from min_value
".reg .u64 q68;\n\t" // Register for final address to load from min_value
".reg .s32 q73;\n\t" // Register for node_value
".reg .s32 q74;\n\t" // Register for node_value
".reg .s32 q75;\n\t" // Register for node_value
".reg .s32 q76;\n\t" // Register for node_value
".reg .pred q49;\n\t" // Register for c predicate
".reg .pred q50;\n\t" // Register for c predicate
".reg .pred q51;\n\t" // Register for c predicate
".reg .pred q52;\n\t" // Register for c predicate
"ld.s32 q1, [%0+0];\n\t" // Load nid
"ld.s32 q2, [%0+4];\n\t" // Load nid
"ld.s32 q3, [%0+8];\n\t" // Load nid
"ld.s32 q4, [%0+12];\n\t" // Load nid
"mul.wide.s32 q17, q1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q18, q2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q19, q3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q20, q4, 4;\n\t" // Multiply nid for address calculation
"add.u64 q25, m99, q17;\n\t" // Final address calculation for c
"add.u64 q26, m99, q18;\n\t" // Final address calculation for c
"add.u64 q27, m99, q19;\n\t" // Final address calculation for c
"add.u64 q28, m99, q20;\n\t" // Final address calculation for c
"add.u64 q65, m100, q17;\n\t" // Final address calculation for node_value
"add.u64 q66, m100, q18;\n\t" // Final address calculation for node_value
"add.u64 q67, m100, q19;\n\t" // Final address calculation for node_value
"add.u64 q68, m100, q20;\n\t" // Final address calculation for node_value
"ld.s32 q33, [q25];\n\t" // Load c
"ld.s32 q34, [q26];\n\t" // Load c
"ld.s32 q35, [q27];\n\t" // Load c
"ld.s32 q36, [q28];\n\t" // Load c
"setp.eq.s32 q49, q33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q50, q34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q51, q35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q52, q36, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_4_1:\n\t" // Predicate body
"@!q49 bra MIS1_PRED_BODY_4_2;\n\t" // Predicate on value of c
"atom.min.s32 q73, [q65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_2:\n\t" // Predicate body
"@!q50 bra MIS1_PRED_BODY_4_3;\n\t" // Predicate on value of c
"atom.min.s32 q74, [q66], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_3:\n\t" // Predicate body
"@!q51 bra MIS1_PRED_BODY_4_4;\n\t" // Predicate on value of c
"atom.min.s32 q75, [q67], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_4_4:\n\t" // Predicate body
"@!q52 bra MIS1_NEIGH_LOOP_4;\n\t" // Predicate on value of c
"atom.min.s32 q76, [q68], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_4:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 2); edge += 2) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 t1;\n\t" // Register for nid loaded from col
".reg .s32 t2;\n\t" // Register for nid loaded from col
".reg .u64 t17;\n\t" // Register for multiplied nid value as address
".reg .u64 t18;\n\t" // Register for multiplied nid value as address
".reg .u64 t25;\n\t" // Register for final address to load from c
".reg .u64 t26;\n\t" // Register for final address to load from c
".reg .s32 t33;\n\t" // Register for c
".reg .s32 t34;\n\t" // Register for c
".reg .u64 t65;\n\t" // Register for final address to load from node_value
".reg .u64 t66;\n\t" // Register for final address to load from node_value
".reg .s32 t73;\n\t" // Register for node_value
".reg .s32 t74;\n\t" // Register for node_value
".reg .pred t49;\n\t" // Register for c predicate
".reg .pred t50;\n\t" // Register for c predicate
"ld.s32 t1, [%0+0];\n\t" // Load nid
"ld.s32 t2, [%0+4];\n\t" // Load nid
"mul.wide.s32 t17, t1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 t18, t2, 4;\n\t" // Multiply nid for address calculation
"add.u64 t25, m99, t17;\n\t" // Final address calculation for c
"add.u64 t26, m99, t18;\n\t" // Final address calculation for c
"add.u64 t65, m100, t17;\n\t" // Final address calculation for node_value
"add.u64 t66, m100, t18;\n\t" // Final address calculation for node_value
"ld.s32 t33, [t25];\n\t" // Load c
"ld.s32 t34, [t26];\n\t" // Load c
"setp.eq.s32 t49, t33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 t50, t34, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_2_1:\n\t" // Predicate body
"@!t49 bra MIS1_PRED_BODY_2_2;\n\t" // Predicate on value of c
"atom.min.s32 t73, [t65], m101;\n\t" // Do min of node_value
"MIS1_PRED_BODY_2_2:\n\t" // Predicate body
"@!t50 bra MIS1_NEIGH_LOOP_2;\n\t" // Predicate on value of c
"atom.min.s32 t74, [t66], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_2:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge < row_end; edge++) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 a1;\n\t" // Register for nid loaded from col
".reg .u64 a17;\n\t" // Register for multiplied nid value as address
".reg .u64 a25;\n\t" // Register for final address to load from c
".reg .s32 a33;\n\t" // Register for c
".reg .u64 a65;\n\t" // Register for final address to load from node_value
".reg .s32 a73;\n\t" // Register for node_value
".reg .pred a49;\n\t" // Register for s predicate
"ld.s32 a1, [%0+0];\n\t" // Load nid
"mul.wide.s32 a17, a1, 4;\n\t" // Multiply nid for s address calculation
"add.u64 a25, m99, a17;\n\t" // Final address calculation for c
"add.u64 a65, m100, a17;\n\t" // Final address calculation for node_value
"ld.s32 a33, [a25];\n\t" // Load c
"setp.eq.s32 a49, a33, mNOTPROCESSED;\n\t" // Value for predicate
"MIS1_PRED_BODY_1_1:\n\t" // Predicate body
"@!a49 bra MIS1_NEIGH_LOOP_1;\n\t" // Predicate on value of c
"atom.min.s32 a73, [a65], m101;\n\t" // Do min of node_value
"MIS1_NEIGH_LOOP_1:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
*/
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
cont[tid] = local_cont;
}
/**
* mis2 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param min_array node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void mis2(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *cu_array, int *min_array, int num_gpu_nodes, int num_edges)
{
const int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int edge = 0;
/*
asm volatile
(
".reg .s32 mINDEPENDENT;\n\t" // Register for s_array addr
".reg .s32 mNOTPROCESSED;\n\t" // Register for s_array addr
".reg .s32 mINACTIVE;\n\t" // Register for s_array addr
"mov.s32 mINDEPENDENT, %0;\n\t"
"mov.s32 mNOTPROCESSED, %1;\n\t"
"mov.s32 mINACTIVE, %2;\n\t"
: // Outputs
: "r"(INDEPENDENT), "r"(NOT_PROCESSED), "r"(INACTIVE) // Inputs
);
*/
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; tid < num_gpu_nodes; tid += blockDim.x * gridDim.x) {
#ifdef SYNC
const int my_min_value = atomicAdd(&min_array[tid], 0);
#else
const int my_min_value = min_array[tid];
#endif
if (node_value[tid] <= my_min_value && c_array[tid] == NOT_PROCESSED) {
// -1: Not processed -2: Inactive 2: Independent set
// Put the item into the independent set
s_array[tid] = INDEPENDENT;
// Get the start and end pointers
const int row_start = row[tid];
const int row_end = row[tid + 1];
// Set the status to inactive
//cu_array[tid] = INACTIVE;
#ifdef SYNC
atomicExch(&cu_array[tid], INACTIVE);
#else
cu_array[tid] = INACTIVE;
#endif
/*
asm volatile
(
".reg .u64 m99;\n\t" // Register for c_array addr
".reg .u64 m100;\n\t" // Register for cu_array addr
"mov.u64 m99, %0;\n\t"
"mov.u64 m100, %1;\n\t"
: // Outputs
: "l"(c_array), "l"(cu_array) // Inputs
);
*/
// Mark all the neighbors inactive
for (int edge = row_start; edge < row_end; edge++) {
const int neighbor = col[edge];
if (c_array[neighbor] == NOT_PROCESSED) {
//use status update array to avoid race
//cu_array[neighbor] = INACTIVE;
#ifdef SYNC
atomicExch(&cu_array[neighbor], INACTIVE);
#else
cu_array[neighbor] = INACTIVE;
#endif
}
}
/*
for (edge = row_start; edge <= (row_end - 8); edge += 8) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 m1;\n\t" // Register for nid loaded from col
".reg .s32 m2;\n\t" // Register for nid loaded from col
".reg .s32 m3;\n\t" // Register for nid loaded from col
".reg .s32 m4;\n\t" // Register for nid loaded from col
".reg .s32 m5;\n\t" // Register for nid loaded from col
".reg .s32 m6;\n\t" // Register for nid loaded from col
".reg .s32 m7;\n\t" // Register for nid loaded from col
".reg .s32 m8;\n\t" // Register for nid loaded from col
".reg .u64 m17;\n\t" // Register for multiplied nid value as address
".reg .u64 m18;\n\t" // Register for multiplied nid value as address
".reg .u64 m19;\n\t" // Register for multiplied nid value as address
".reg .u64 m20;\n\t" // Register for multiplied nid value as address
".reg .u64 m21;\n\t" // Register for multiplied nid value as address
".reg .u64 m22;\n\t" // Register for multiplied nid value as address
".reg .u64 m23;\n\t" // Register for multiplied nid value as address
".reg .u64 m24;\n\t" // Register for multiplied nid value as address
".reg .u64 m25;\n\t" // Register for final address to load from c
".reg .u64 m26;\n\t" // Register for final address to load from c
".reg .u64 m27;\n\t" // Register for final address to load from c
".reg .u64 m28;\n\t" // Register for final address to load from c
".reg .u64 m29;\n\t" // Register for final address to load from c
".reg .u64 m30;\n\t" // Register for final address to load from c
".reg .u64 m31;\n\t" // Register for final address to load from c
".reg .u64 m32;\n\t" // Register for final address to load from c
".reg .s32 m33;\n\t" // Register for c
".reg .s32 m34;\n\t" // Register for c
".reg .s32 m35;\n\t" // Register for c
".reg .s32 m36;\n\t" // Register for c
".reg .s32 m37;\n\t" // Register for c
".reg .s32 m38;\n\t" // Register for c
".reg .s32 m39;\n\t" // Register for c
".reg .s32 m40;\n\t" // Register for c
".reg .u64 m65;\n\t" // Register for final address to load from cu
".reg .u64 m66;\n\t" // Register for final address to load from cu
".reg .u64 m67;\n\t" // Register for final address to load from cu
".reg .u64 m68;\n\t" // Register for final address to load from cu
".reg .u64 m69;\n\t" // Register for final address to load from cu
".reg .u64 m70;\n\t" // Register for final address to load from cu
".reg .u64 m71;\n\t" // Register for final address to load from cu
".reg .u64 m72;\n\t" // Register for final address to load from cu
".reg .b32 m73;\n\t" // Register for cu
".reg .b32 m74;\n\t" // Register for cu
".reg .b32 m75;\n\t" // Register for cu
".reg .b32 m76;\n\t" // Register for cu
".reg .b32 m77;\n\t" // Register for cu
".reg .b32 m78;\n\t" // Register for cu
".reg .b32 m79;\n\t" // Register for cu
".reg .b32 m80;\n\t" // Register for cu
".reg .pred m49;\n\t" // Register for c predicate
".reg .pred m50;\n\t" // Register for c predicate
".reg .pred m51;\n\t" // Register for c predicate
".reg .pred m52;\n\t" // Register for c predicate
".reg .pred m53;\n\t" // Register for c predicate
".reg .pred m54;\n\t" // Register for c predicate
".reg .pred m55;\n\t" // Register for c predicate
".reg .pred m56;\n\t" // Register for c predicate
"ld.s32 m1, [%0+0];\n\t" // Load nid
"ld.s32 m2, [%0+4];\n\t" // Load nid
"ld.s32 m3, [%0+8];\n\t" // Load nid
"ld.s32 m4, [%0+12];\n\t" // Load nid
"ld.s32 m5, [%0+16];\n\t" // Load nid
"ld.s32 m6, [%0+20];\n\t" // Load nid
"ld.s32 m7, [%0+24];\n\t" // Load nid
"ld.s32 m8, [%0+28];\n\t" // Load nid
"mul.wide.s32 m17, m1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m18, m2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m19, m3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m20, m4, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m21, m5, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m22, m6, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m23, m7, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 m24, m8, 4;\n\t" // Multiply nid for address calculation
"add.u64 m25, m99, m17;\n\t" // Final address calculation for c
"add.u64 m26, m99, m18;\n\t" // Final address calculation for c
"add.u64 m27, m99, m19;\n\t" // Final address calculation for c
"add.u64 m28, m99, m20;\n\t" // Final address calculation for c
"add.u64 m29, m99, m21;\n\t" // Final address calculation for c
"add.u64 m30, m99, m22;\n\t" // Final address calculation for c
"add.u64 m31, m99, m23;\n\t" // Final address calculation for c
"add.u64 m32, m99, m24;\n\t" // Final address calculation for c
"add.u64 m65, m100, m17;\n\t" // Final address calculation for cu
"add.u64 m66, m100, m18;\n\t" // Final address calculation for cu
"add.u64 m67, m100, m19;\n\t" // Final address calculation for cu
"add.u64 m68, m100, m20;\n\t" // Final address calculation for cu
"add.u64 m69, m100, m21;\n\t" // Final address calculation for cu
"add.u64 m70, m100, m22;\n\t" // Final address calculation for cu
"add.u64 m71, m100, m23;\n\t" // Final address calculation for cu
"add.u64 m72, m100, m24;\n\t" // Final address calculation for cu
"ld.s32 m33, [m25];\n\t" // Load c
"ld.s32 m34, [m26];\n\t" // Load c
"ld.s32 m35, [m27];\n\t" // Load c
"ld.s32 m36, [m28];\n\t" // Load c
"ld.s32 m37, [m29];\n\t" // Load c
"ld.s32 m38, [m30];\n\t" // Load c
"ld.s32 m39, [m31];\n\t" // Load c
"ld.s32 m40, [m32];\n\t" // Load c
"setp.eq.s32 m49, m33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m50, m34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m51, m35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m52, m36, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m53, m37, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m54, m38, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m55, m39, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 m56, m40, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_8_1:\n\t" // Predicate body
"@!m49 bra MIS2_PRED_BODY_8_2;\n\t" // Predicate on value of c
"atom.or.b32 m73, [m65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_2:\n\t" // Predicate body
"@!m50 bra MIS2_PRED_BODY_8_3;\n\t" // Predicate on value of c
"atom.or.b32 m74, [m66], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_3:\n\t" // Predicate body
"@!m51 bra MIS2_PRED_BODY_8_4;\n\t" // Predicate on value of c
"atom.or.b32 m75, [m67], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_4:\n\t" // Predicate body
"@!m52 bra MIS2_PRED_BODY_8_5;\n\t" // Predicate on value of c
"atom.or.b32 m76, [m68], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_5:\n\t" // Predicate body
"@!m53 bra MIS2_PRED_BODY_8_6;\n\t" // Predicate on value of c
"atom.or.b32 m77, [m69], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_6:\n\t" // Predicate body
"@!m54 bra MIS2_PRED_BODY_8_7;\n\t" // Predicate on value of c
"atom.or.b32 m78, [m70], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_7:\n\t" // Predicate body
"@!m55 bra MIS2_PRED_BODY_8_8;\n\t" // Predicate on value of c
"atom.or.b32 m79, [m71], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_8_8:\n\t" // Predicate body
"@!m56 bra MIS2_NEIGH_LOOP_8;\n\t" // Predicate on value of c
"atom.or.b32 m80, [m72], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_8:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 4); edge += 4) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 q1;\n\t" // Register for nid loaded from col
".reg .s32 q2;\n\t" // Register for nid loaded from col
".reg .s32 q3;\n\t" // Register for nid loaded from col
".reg .s32 q4;\n\t" // Register for nid loaded from col
".reg .u64 q17;\n\t" // Register for multiplied nid value as address
".reg .u64 q18;\n\t" // Register for multiplied nid value as address
".reg .u64 q19;\n\t" // Register for multiplied nid value as address
".reg .u64 q20;\n\t" // Register for multiplied nid value as address
".reg .u64 q25;\n\t" // Register for final address to load from c
".reg .u64 q26;\n\t" // Register for final address to load from c
".reg .u64 q27;\n\t" // Register for final address to load from c
".reg .u64 q28;\n\t" // Register for final address to load from c
".reg .s32 q33;\n\t" // Register for c
".reg .s32 q34;\n\t" // Register for c
".reg .s32 q35;\n\t" // Register for c
".reg .s32 q36;\n\t" // Register for c
".reg .u64 q65;\n\t" // Register for final address to load from cu
".reg .u64 q66;\n\t" // Register for final address to load from cu
".reg .u64 q67;\n\t" // Register for final address to load from cu
".reg .u64 q68;\n\t" // Register for final address to load from cu
".reg .b32 q73;\n\t" // Register for cu
".reg .b32 q74;\n\t" // Register for cu
".reg .b32 q75;\n\t" // Register for cu
".reg .b32 q76;\n\t" // Register for cu
".reg .pred q49;\n\t" // Register for c predicate
".reg .pred q50;\n\t" // Register for c predicate
".reg .pred q51;\n\t" // Register for c predicate
".reg .pred q52;\n\t" // Register for c predicate
"ld.s32 q1, [%0+0];\n\t" // Load nid
"ld.s32 q2, [%0+4];\n\t" // Load nid
"ld.s32 q3, [%0+8];\n\t" // Load nid
"ld.s32 q4, [%0+12];\n\t" // Load nid
"mul.wide.s32 q17, q1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q18, q2, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q19, q3, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 q20, q4, 4;\n\t" // Multiply nid for address calculation
"add.u64 q25, m99, q17;\n\t" // Final address calculation for c
"add.u64 q26, m99, q18;\n\t" // Final address calculation for c
"add.u64 q27, m99, q19;\n\t" // Final address calculation for c
"add.u64 q28, m99, q20;\n\t" // Final address calculation for c
"add.u64 q65, m100, q17;\n\t" // Final address calculation for cu
"add.u64 q66, m100, q18;\n\t" // Final address calculation for cu
"add.u64 q67, m100, q19;\n\t" // Final address calculation for cu
"add.u64 q68, m100, q20;\n\t" // Final address calculation for cu
"ld.s32 q33, [q25];\n\t" // Load c
"ld.s32 q34, [q26];\n\t" // Load c
"ld.s32 q35, [q27];\n\t" // Load c
"ld.s32 q36, [q28];\n\t" // Load c
"setp.eq.s32 q49, q33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q50, q34, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q51, q35, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 q52, q36, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_4_1:\n\t" // Predicate body
"@!q49 bra MIS2_PRED_BODY_4_2;\n\t" // Predicate on value of c
"atom.or.b32 q73, [q65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_2:\n\t" // Predicate body
"@!q50 bra MIS2_PRED_BODY_4_3;\n\t" // Predicate on value of c
"atom.or.b32 q74, [q66], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_3:\n\t" // Predicate body
"@!q51 bra MIS2_PRED_BODY_4_4;\n\t" // Predicate on value of c
"atom.or.b32 q75, [q67], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_4_4:\n\t" // Predicate body
"@!q52 bra MIS2_NEIGH_LOOP_4;\n\t" // Predicate on value of c
"atom.or.b32 q76, [q68], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_4:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge <= (row_end - 2); edge += 2) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 t1;\n\t" // Register for nid loaded from col
".reg .s32 t2;\n\t" // Register for nid loaded from col
".reg .u64 t17;\n\t" // Register for multiplied nid value as address
".reg .u64 t18;\n\t" // Register for multiplied nid value as address
".reg .u64 t25;\n\t" // Register for final address to load from c
".reg .u64 t26;\n\t" // Register for final address to load from c
".reg .s32 t33;\n\t" // Register for c
".reg .s32 t34;\n\t" // Register for c
".reg .u64 t65;\n\t" // Register for final address to load from cu
".reg .u64 t66;\n\t" // Register for final address to load from cu
".reg .b32 t73;\n\t" // Register for cu
".reg .b32 t74;\n\t" // Register for cu
".reg .pred t49;\n\t" // Register for c predicate
".reg .pred t50;\n\t" // Register for c predicate
"ld.s32 t1, [%0+0];\n\t" // Load nid
"ld.s32 t2, [%0+4];\n\t" // Load nid
"mul.wide.s32 t17, t1, 4;\n\t" // Multiply nid for address calculation
"mul.wide.s32 t18, t2, 4;\n\t" // Multiply nid for address calculation
"add.u64 t25, m99, t17;\n\t" // Final address calculation for c
"add.u64 t26, m99, t18;\n\t" // Final address calculation for c
"add.u64 t65, m100, t17;\n\t" // Final address calculation for cu
"add.u64 t66, m100, t18;\n\t" // Final address calculation for cu
"ld.s32 t33, [t25];\n\t" // Load c
"ld.s32 t34, [t26];\n\t" // Load c
"setp.eq.s32 t49, t33, mNOTPROCESSED;\n\t" // Value for predicate
"setp.eq.s32 t50, t34, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_2_1:\n\t" // Predicate body
"@!t49 bra MIS2_PRED_BODY_2_2;\n\t" // Predicate on value of c
"atom.or.b32 t73, [t65], mINACTIVE;\n\t" // Set cu
"MIS2_PRED_BODY_2_2:\n\t" // Predicate body
"@!t50 bra MIS2_NEIGH_LOOP_2;\n\t" // Predicate on value of c
"atom.or.b32 t74, [t66], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_2:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
for (; edge < row_end; edge++) {
int * const col_base_addr = &col[edge];
asm volatile
(
".reg .s32 a1;\n\t" // Register for nid loaded from col
".reg .u64 a17;\n\t" // Register for multiplied nid value as address
".reg .u64 a25;\n\t" // Register for final address to load from c
".reg .s32 a33;\n\t" // Register for c
".reg .u64 a65;\n\t" // Register for final address to load from cu
".reg .b32 a73;\n\t" // Register for cu
".reg .pred a49;\n\t" // Register for s predicate
"ld.s32 a1, [%0+0];\n\t" // Load nid
"mul.wide.s32 a17, a1, 4;\n\t" // Multiply nid for address calculation
"add.u64 a25, m99, a17;\n\t" // Final address calculation for c
"add.u64 a65, m100, a17;\n\t" // Final address calculation for cu
"ld.s32 a33, [a25];\n\t" // Load c
"setp.eq.s32 a49, a33, mNOTPROCESSED;\n\t" // Value for predicate
"MIS2_PRED_BODY_1_1:\n\t" // Predicate body
"@!a49 bra MIS2_NEIGH_LOOP_1;\n\t" // Predicate on value of c
"atom.or.b32 a73, [a65], mINACTIVE;\n\t" // Set cu
"MIS2_NEIGH_LOOP_1:\n\t"
: // Outputs
: "l"(col_base_addr) // Inputs
);
}
*/
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
}
/**
* mis3 kernel
* @param cu_array status update array
* @param c_array status array
* @param num_nodes number of vertices
*/
__global__ void
mis3(int *cu_array, int *c_array, int *min_array, int num_gpu_nodes)
{
const int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/*
if (tx == 0) {
__denovo_setAcquireRegion(SPECIAL_REGION);
__denovo_addAcquireRegion(READ_ONLY_REGION);
__denovo_addAcquireRegion(default_reg);
__denovo_addAcquireRegion(rel_reg);
}
__syncthreads();
*/
for (; tid < num_gpu_nodes; tid += blockDim.x * gridDim.x) {
//set the status array
#ifdef SYNC
const int status = atomicAdd(&cu_array[tid], 0);
#else
const int status = cu_array[tid];
#endif
if (status == INACTIVE) {
c_array[tid] = status;
} else {
#ifdef SYNC
atomicExch(&min_array[tid], INT_MAX);
#else
min_array[tid] = INT_MAX;
#endif
}
}
/*
if (tx == 0) {
__denovo_gpuEpilogue(SPECIAL_REGION);
__denovo_gpuEpilogue(READ_ONLY_REGION);
__denovo_gpuEpilogue(default_reg);
__denovo_gpuEpilogue(rel_reg);
}
*/
}
|
8c947b8d5da024e5512b8f2724c9626c22141510.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
//#include <cstdio> - uncomment for printf in kernels
//#include <hip/hip_runtime.h>
//cuda is included automatically when compiling with nvcc
typedef double REAL_T;
//-----------------------------------------------------------------------------
class CUDAEventTimer {
public:
CUDAEventTimer() {
hipEventCreate(&start_);
hipEventCreate(&stop_);
}
~CUDAEventTimer() {
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
void start(hipStream_t stream = 0) {
stream_ = stream;
hipEventRecord(start_, stream_);
}
void stop() {
hipEventRecord(stop_, stream_);
hipEventSynchronize(stop_);
}
float elapsed() {
float elapsed = 0;
hipEventElapsedTime(&elapsed, start_, stop_);
return elapsed;
}
private:
hipEvent_t start_, stop_;
hipStream_t stream_;
};
int compute_blocks(int length, int threads_per_block) {
//integer division:
//if length is evenly divisable by the number of threads
//is equivalent to length / threads_per_block, if not
//it is equivalent to length / threads_per_block + 1
return (length + threads_per_block - 1) / threads_per_block;
}
dim3 compute_blocks(int xsize, int ysize, int zsize,
int threads_per_block_x,
int threads_per_block_y,
int threads_per_block_z) {
return dim3(compute_blocks(xsize, threads_per_block_x),
compute_blocks(ysize, threads_per_block_y),
compute_blocks(zsize, threads_per_block_z));
}
//-----------------------------------------------------------------------------
__device__ REAL_T cell_op( REAL_T v) {
return cos(v) * exp(v);
}
//-----------------------------------------------------------------------------
__global__ void cuda_kernel(REAL_T* grid,
dim3 size,
int x_offset,
int y_offset,
int z_offset) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;// + x_offset;
const int j = blockIdx.y * blockDim.y + threadIdx.y;// + y_offset;
const int k = blockIdx.z * blockDim.z + threadIdx.z;// + z_offset;
if( i >= size.x || j >= size.y || k >= size.z ) return;
const int index = i + size.x * (j + size.y * k);
grid[ index ] = cell_op( grid[ index ] );
}
typedef long long int CYCLES;
__global__ void cuda_kernel_cycles(CYCLES cycles){
const CYCLES start = clock64();
while( (clock64() - start) < cycles );
}
//-----------------------------------------------------------------------------
int main(int argc, char** argv) {
if(argc < 5) {
std::cout << "usage: " << argv[0]
<< " xsize ysize zsize threads_per_block [kernel duration(ms)]\n";
return 1;
}
const int XSIZE = atoi(argv[1]);
const int YSIZE = atoi(argv[2]);
const int ZSIZE = atoi(argv[3]);
const int CUDA_THREADS_PER_BLOCK = atoi(argv[4]);
const size_t TOTAL_SIZE = XSIZE * YSIZE * ZSIZE;
const size_t TOTAL_BYTE_SIZE = TOTAL_SIZE * sizeof(REAL_T);
bool use_cycles = false;
int time_ms = 0;
CYCLES cycles = 0;
if( argc > 5 ) {
time_ms = atoi(argv[5]);
use_cycles = true;
}
// get clock rate in kHz
hipDeviceProp_t props;
if( hipGetDeviceProperties(&props, 0) != hipSuccess ) return -1;
const unsigned int CLOCK_RATE_Hz = props.clockRate * 1000;
std::cout << "Clock rate (GHz): "
<< CLOCK_RATE_Hz / double(1024 * 1024 * 1024)
<< std::endl;
cycles = CLOCK_RATE_Hz * (time_ms / 1000.0);
// 3D grid setup
std::vector< REAL_T > h_grid(TOTAL_SIZE, REAL_T(0));
REAL_T* d_grid = 0;
if( hipMalloc(&d_grid, 2*TOTAL_BYTE_SIZE) != hipSuccess ) return -2;
if( hipMemcpy(d_grid, &h_grid[0], TOTAL_BYTE_SIZE, hipMemcpyHostToDevice)
!= hipSuccess ) return -3;
// launch configuration
const dim3 CUDA_THREADS = dim3(CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
const dim3 CUDA_BLOCKS = compute_blocks(XSIZE, YSIZE, ZSIZE,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
int x_offset = 0;
int y_offset = 0;
int z_offset = 0;
const dim3 GRID_SIZE = dim3(XSIZE, YSIZE, ZSIZE);
hipDeviceSynchronize();
// launch one kernel encompassing the entire grid...
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: "
<< CUDA_BLOCKS.x << ", " << CUDA_BLOCKS.y << ", " << CUDA_BLOCKS.z << std::endl;
CUDAEventTimer timer;
timer.start();
if( use_cycles ) {
hipLaunchKernelGGL(( cuda_kernel_cycles), dim3(CUDA_BLOCKS), dim3(CUDA_THREADS) , 0, 0, cycles);
} else {
hipLaunchKernelGGL(( cuda_kernel), dim3(CUDA_BLOCKS), dim3(CUDA_THREADS) , 0, 0, d_grid,
GRID_SIZE,
x_offset,
y_offset,
z_offset);
}
timer.stop();
const float single_elapsed = timer.elapsed();
std::cout << "Single kernel launch: " << single_elapsed << std::endl;
// ...and multiple time the same kernel on the same grid
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: 1, 1, 1" << std::endl;
hipDeviceSynchronize();
timer.start();
for( int k = 0; k != CUDA_BLOCKS.z; ++k ) {
z_offset = k * CUDA_THREADS.z;
for( int j = 0; j != CUDA_BLOCKS.y; ++j ) {
y_offset = j * CUDA_THREADS.y;
for( int i = 0; i != CUDA_BLOCKS.x; ++i ) {
x_offset = i * CUDA_THREADS.x;
if( use_cycles ) {
hipLaunchKernelGGL(( cuda_kernel_cycles), dim3(1), dim3(CUDA_THREADS) , 0, 0, cycles);
} else {
hipLaunchKernelGGL(( cuda_kernel), dim3(1), dim3(CUDA_THREADS) , 0, 0, d_grid, GRID_SIZE,
x_offset, y_offset, z_offset);
}
}
}
}
timer.stop();
const float multiple_elapsed = timer.elapsed();
std::cout << "Multiple kernel launches: " << multiple_elapsed << std::endl;
std::cout << "Multiple/Single %: " << 100 * multiple_elapsed / single_elapsed << std::endl;
// cleanup
hipFree(d_grid);
return 0;
}
|
8c947b8d5da024e5512b8f2724c9626c22141510.cu
|
#include <iostream>
#include <vector>
//#include <cstdio> - uncomment for printf in kernels
//#include <cuda_runtime.h>
//cuda is included automatically when compiling with nvcc
typedef double REAL_T;
//-----------------------------------------------------------------------------
class CUDAEventTimer {
public:
CUDAEventTimer() {
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
}
~CUDAEventTimer() {
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start(cudaStream_t stream = 0) {
stream_ = stream;
cudaEventRecord(start_, stream_);
}
void stop() {
cudaEventRecord(stop_, stream_);
cudaEventSynchronize(stop_);
}
float elapsed() {
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start_, stop_);
return elapsed;
}
private:
cudaEvent_t start_, stop_;
cudaStream_t stream_;
};
int compute_blocks(int length, int threads_per_block) {
//integer division:
//if length is evenly divisable by the number of threads
//is equivalent to length / threads_per_block, if not
//it is equivalent to length / threads_per_block + 1
return (length + threads_per_block - 1) / threads_per_block;
}
dim3 compute_blocks(int xsize, int ysize, int zsize,
int threads_per_block_x,
int threads_per_block_y,
int threads_per_block_z) {
return dim3(compute_blocks(xsize, threads_per_block_x),
compute_blocks(ysize, threads_per_block_y),
compute_blocks(zsize, threads_per_block_z));
}
//-----------------------------------------------------------------------------
__device__ REAL_T cell_op( REAL_T v) {
return cos(v) * exp(v);
}
//-----------------------------------------------------------------------------
__global__ void cuda_kernel(REAL_T* grid,
dim3 size,
int x_offset,
int y_offset,
int z_offset) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;// + x_offset;
const int j = blockIdx.y * blockDim.y + threadIdx.y;// + y_offset;
const int k = blockIdx.z * blockDim.z + threadIdx.z;// + z_offset;
if( i >= size.x || j >= size.y || k >= size.z ) return;
const int index = i + size.x * (j + size.y * k);
grid[ index ] = cell_op( grid[ index ] );
}
typedef long long int CYCLES;
__global__ void cuda_kernel_cycles(CYCLES cycles){
const CYCLES start = clock64();
while( (clock64() - start) < cycles );
}
//-----------------------------------------------------------------------------
int main(int argc, char** argv) {
if(argc < 5) {
std::cout << "usage: " << argv[0]
<< " xsize ysize zsize threads_per_block [kernel duration(ms)]\n";
return 1;
}
const int XSIZE = atoi(argv[1]);
const int YSIZE = atoi(argv[2]);
const int ZSIZE = atoi(argv[3]);
const int CUDA_THREADS_PER_BLOCK = atoi(argv[4]);
const size_t TOTAL_SIZE = XSIZE * YSIZE * ZSIZE;
const size_t TOTAL_BYTE_SIZE = TOTAL_SIZE * sizeof(REAL_T);
bool use_cycles = false;
int time_ms = 0;
CYCLES cycles = 0;
if( argc > 5 ) {
time_ms = atoi(argv[5]);
use_cycles = true;
}
// get clock rate in kHz
cudaDeviceProp props;
if( cudaGetDeviceProperties(&props, 0) != cudaSuccess ) return -1;
const unsigned int CLOCK_RATE_Hz = props.clockRate * 1000;
std::cout << "Clock rate (GHz): "
<< CLOCK_RATE_Hz / double(1024 * 1024 * 1024)
<< std::endl;
cycles = CLOCK_RATE_Hz * (time_ms / 1000.0);
// 3D grid setup
std::vector< REAL_T > h_grid(TOTAL_SIZE, REAL_T(0));
REAL_T* d_grid = 0;
if( cudaMalloc(&d_grid, 2*TOTAL_BYTE_SIZE) != cudaSuccess ) return -2;
if( cudaMemcpy(d_grid, &h_grid[0], TOTAL_BYTE_SIZE, cudaMemcpyHostToDevice)
!= cudaSuccess ) return -3;
// launch configuration
const dim3 CUDA_THREADS = dim3(CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
const dim3 CUDA_BLOCKS = compute_blocks(XSIZE, YSIZE, ZSIZE,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
int x_offset = 0;
int y_offset = 0;
int z_offset = 0;
const dim3 GRID_SIZE = dim3(XSIZE, YSIZE, ZSIZE);
cudaDeviceSynchronize();
// launch one kernel encompassing the entire grid...
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: "
<< CUDA_BLOCKS.x << ", " << CUDA_BLOCKS.y << ", " << CUDA_BLOCKS.z << std::endl;
CUDAEventTimer timer;
timer.start();
if( use_cycles ) {
cuda_kernel_cycles<<< CUDA_BLOCKS, CUDA_THREADS >>>(cycles);
} else {
cuda_kernel<<< CUDA_BLOCKS, CUDA_THREADS >>>(d_grid,
GRID_SIZE,
x_offset,
y_offset,
z_offset);
}
timer.stop();
const float single_elapsed = timer.elapsed();
std::cout << "Single kernel launch: " << single_elapsed << std::endl;
// ...and multiple time the same kernel on the same grid
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: 1, 1, 1" << std::endl;
cudaDeviceSynchronize();
timer.start();
for( int k = 0; k != CUDA_BLOCKS.z; ++k ) {
z_offset = k * CUDA_THREADS.z;
for( int j = 0; j != CUDA_BLOCKS.y; ++j ) {
y_offset = j * CUDA_THREADS.y;
for( int i = 0; i != CUDA_BLOCKS.x; ++i ) {
x_offset = i * CUDA_THREADS.x;
if( use_cycles ) {
cuda_kernel_cycles<<< 1, CUDA_THREADS >>>(cycles);
} else {
cuda_kernel<<< 1, CUDA_THREADS >>>(d_grid, GRID_SIZE,
x_offset, y_offset, z_offset);
}
}
}
}
timer.stop();
const float multiple_elapsed = timer.elapsed();
std::cout << "Multiple kernel launches: " << multiple_elapsed << std::endl;
std::cout << "Multiple/Single %: " << 100 * multiple_elapsed / single_elapsed << std::endl;
// cleanup
cudaFree(d_grid);
return 0;
}
|
323de27847d3237cb03f5405cb938388e9b9dbf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fill_lower_left_gpu(int *iRow, int *jCol, unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
|
323de27847d3237cb03f5405cb938388e9b9dbf4.cu
|
#include "includes.h"
__global__ void fill_lower_left_gpu(int *iRow, int *jCol, unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.