hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b53d23d1dde8488a83ef4f3d556133d1f1f7154b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
//extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
//}
//extern "C" void forward_deconvolutional_layer_gpu(layer l, network_state state)
void forward_deconvolutional_layer_gpu(layer l, network_state state)
{
int i;
int out_h = l.out_h;
int out_w = l.out_w;
int size = out_h*out_w;
int m = l.size*l.size*l.n;
int n = l.h*l.w;
int k = l.c;
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
for(i = 0; i < l.batch; ++i){
float *a = l.weights_gpu;
float *b = state.input + i*l.c*l.h*l.w;
float *c = state.workspace;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, l.n, out_h, out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.n*size);
}
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_ongpu(l.output_gpu, l.batch*l.n*size, l.activation);
}
//extern "C" void backward_deconvolutional_layer_gpu(layer l, network_state state)
void backward_deconvolutional_layer_gpu(layer l, network_state state)
{
int out_h = l.out_h;
int out_w = l.out_w;
int size = out_h*out_w;
int i;
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
//if(state.delta) memset(state.delta, 0, l.batch*l.h*l.w*l.c*sizeof(float));
for(i = 0; i < l.batch; ++i){
int m = l.c;
int n = l.size*l.size*l.n;
int k = l.h*l.w;
float *a = state.input + i*m*n;
float *b = state.workspace;
float *c = l.weight_updates_gpu;
im2col_ongpu(l.delta_gpu + i*l.n*size, l.n, out_h, out_w,
l.size, l.stride, l.pad, b);
gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
if(state.delta){
int m = l.c;
int n = l.h*l.w;
int k = l.size*l.size*l.n;
float *a = l.weights_gpu;
float *b = state.workspace;
float *c = state.delta + i*n*m;
gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
//extern "C" void pull_deconvolutional_layer(layer l)
void pull_deconvolutional_layer(layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array(l.scales_gpu, l.scales, l.n);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
//extern "C" void push_deconvolutional_layer(layer l)
void push_deconvolutional_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void update_deconvolutional_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay)
{
int size = l.size*l.size*l.c*l.n;
axpy_ongpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1);
if(l.scales_gpu){
axpy_ongpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1);
}
axpy_ongpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(size, momentum, l.weight_updates_gpu, 1);
}
|
b53d23d1dde8488a83ef4f3d556133d1f1f7154b.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
//extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
//}
//extern "C" void forward_deconvolutional_layer_gpu(layer l, network_state state)
void forward_deconvolutional_layer_gpu(layer l, network_state state)
{
int i;
int out_h = l.out_h;
int out_w = l.out_w;
int size = out_h*out_w;
int m = l.size*l.size*l.n;
int n = l.h*l.w;
int k = l.c;
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
for(i = 0; i < l.batch; ++i){
float *a = l.weights_gpu;
float *b = state.input + i*l.c*l.h*l.w;
float *c = state.workspace;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, l.n, out_h, out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.n*size);
}
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_ongpu(l.output_gpu, l.batch*l.n*size, l.activation);
}
//extern "C" void backward_deconvolutional_layer_gpu(layer l, network_state state)
void backward_deconvolutional_layer_gpu(layer l, network_state state)
{
int out_h = l.out_h;
int out_w = l.out_w;
int size = out_h*out_w;
int i;
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
//if(state.delta) memset(state.delta, 0, l.batch*l.h*l.w*l.c*sizeof(float));
for(i = 0; i < l.batch; ++i){
int m = l.c;
int n = l.size*l.size*l.n;
int k = l.h*l.w;
float *a = state.input + i*m*n;
float *b = state.workspace;
float *c = l.weight_updates_gpu;
im2col_ongpu(l.delta_gpu + i*l.n*size, l.n, out_h, out_w,
l.size, l.stride, l.pad, b);
gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
if(state.delta){
int m = l.c;
int n = l.h*l.w;
int k = l.size*l.size*l.n;
float *a = l.weights_gpu;
float *b = state.workspace;
float *c = state.delta + i*n*m;
gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
//extern "C" void pull_deconvolutional_layer(layer l)
void pull_deconvolutional_layer(layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array(l.scales_gpu, l.scales, l.n);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
//extern "C" void push_deconvolutional_layer(layer l)
void push_deconvolutional_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void update_deconvolutional_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay)
{
int size = l.size*l.size*l.c*l.n;
axpy_ongpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1);
if(l.scales_gpu){
axpy_ongpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1);
}
axpy_ongpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(size, momentum, l.weight_updates_gpu, 1);
}
|
fdeefc92a1e36282d7b34b266f06d266ce043c66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "cuda/Runtime.h"
#include "cuda/CudaErrCategory.h"
#include "support/bad_alloc.h"
namespace cuda {
/* getDeviceCount() */
int getDeviceCount() {
int count;
checkCall(hipGetDeviceCount(&count), "getDeviceCount");
return count;
}
/* setDevice() */
void setDevice(int rank) {
checkCall(hipSetDevice(rank), "setDevice");
}
/* synchronize() */
void synchronize(const char* message) {
#if CUDART_VERSION >= 4000
hipError_t error = hipDeviceSynchronize();
#else
hipError_t error = hipDeviceSynchronize();
#endif /* CUDART_VERSION */
checkCall(error, std::string("synchronize: ") + message);
}
/* malloc() */
void* malloc(const size_t n, const char* message) {
void* ret = 0;
hipError_t error = hipMalloc(reinterpret_cast<void**>(&ret), n);
if (error) {
throw support::bad_alloc(
(support::getErrorCategory<CudaErrCategory>().message(error) +
message).c_str());
}
return ret;
}
/* free() */
void free(void* ptr, const char* message) {
hipError_t error = hipFree(ptr);
if (error) {
throw support::Exception(error,
support::getErrorCategory<CudaErrCategory>(),
message);
}
}
/* memcpy() */
void memcpyH2D(void* dst, const void* src, const size_t count,
const char* message) {
hipError_t error = hipMemcpy(dst, src, count, hipMemcpyHostToDevice);
checkCall(error, std::string("memcpyH2D: ") + message);
}
void memcpyD2H(void* dst, const void* src, const size_t count,
const char* message) {
hipError_t error = hipMemcpy(dst, src, count, hipMemcpyDeviceToHost);
checkCall(error, std::string("memcpyD2H: ") + message);
}
void memcpyD2D(void* dst, const void* src, const size_t count,
const char* message) {
hipError_t error = hipMemcpy(dst, src, count, hipMemcpyDeviceToDevice);
checkCall(error, std::string("memcpyD2D: ") + message);
}
void memcpyH2H(void* dst, const void* src, const size_t count,
const char* message) {
hipError_t error = hipMemcpy(dst, src, count, hipMemcpyHostToHost);
checkCall(error, std::string("memcpyH2H: ") + message);
}
} /* namespace cuda */
|
fdeefc92a1e36282d7b34b266f06d266ce043c66.cu
|
#include <cuda_runtime_api.h>
#include "cuda/Runtime.h"
#include "cuda/CudaErrCategory.h"
#include "support/bad_alloc.h"
namespace cuda {
/* getDeviceCount() */
int getDeviceCount() {
int count;
checkCall(cudaGetDeviceCount(&count), "getDeviceCount");
return count;
}
/* setDevice() */
void setDevice(int rank) {
checkCall(cudaSetDevice(rank), "setDevice");
}
/* synchronize() */
void synchronize(const char* message) {
#if CUDART_VERSION >= 4000
cudaError_t error = cudaDeviceSynchronize();
#else
cudaError_t error = cudaThreadSynchronize();
#endif /* CUDART_VERSION */
checkCall(error, std::string("synchronize: ") + message);
}
/* malloc() */
void* malloc(const size_t n, const char* message) {
void* ret = 0;
cudaError_t error = cudaMalloc(reinterpret_cast<void**>(&ret), n);
if (error) {
throw support::bad_alloc(
(support::getErrorCategory<CudaErrCategory>().message(error) +
message).c_str());
}
return ret;
}
/* free() */
void free(void* ptr, const char* message) {
cudaError_t error = cudaFree(ptr);
if (error) {
throw support::Exception(error,
support::getErrorCategory<CudaErrCategory>(),
message);
}
}
/* memcpy() */
void memcpyH2D(void* dst, const void* src, const size_t count,
const char* message) {
cudaError_t error = cudaMemcpy(dst, src, count, cudaMemcpyHostToDevice);
checkCall(error, std::string("memcpyH2D: ") + message);
}
void memcpyD2H(void* dst, const void* src, const size_t count,
const char* message) {
cudaError_t error = cudaMemcpy(dst, src, count, cudaMemcpyDeviceToHost);
checkCall(error, std::string("memcpyD2H: ") + message);
}
void memcpyD2D(void* dst, const void* src, const size_t count,
const char* message) {
cudaError_t error = cudaMemcpy(dst, src, count, cudaMemcpyDeviceToDevice);
checkCall(error, std::string("memcpyD2D: ") + message);
}
void memcpyH2H(void* dst, const void* src, const size_t count,
const char* message) {
cudaError_t error = cudaMemcpy(dst, src, count, cudaMemcpyHostToHost);
checkCall(error, std::string("memcpyH2H: ") + message);
}
} /* namespace cuda */
|
0511d85361b582f96db40ffb58ee64a8c71c610f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "cuda_fft.cuh"
#include "../thirdparty/multicuda/src/cuda_helper.cuh"
#include "../image_exception.h"
namespace
{
__global__ void copyFromImageCuda( const uint8_t * in, hipfftComplex * out, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in[id];
out[id].y = 0;
}
}
__global__ void copyFromFloatCuda( const float * in, hipfftComplex * out, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in[id];
out[id].y = 0;
}
}
__global__ void copyToImageCuda( const hipfftComplex * in, uint8_t * out, float size, uint32_t width, uint32_t height )
{
const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y;
if( inX < width && inY < height ) {
const uint32_t id = inY * width + inX;
const uint32_t middleX = width / 2;
const uint32_t middleY = height / 2;
const uint32_t outX = (inX < middleX) ? middleX + inX : inX - middleX;
const uint32_t outY = (inY < middleY) ? middleY + inY : inY - middleY;
out[outY * width + outX] = static_cast<uint8_t>(in[id].x / size + 0.5f);
}
}
__global__ void complexMultiplicationCuda( const hipfftComplex * in1, const hipfftComplex * in2, hipfftComplex * out, uint32_t width, uint32_t height )
{
// in1 = A + iB
// in2 = C + iD
// out = in1 * (-in2) = (A + iB) * (-C - iD) = - A * C - i(B * C) - i(A * D) + B * D
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in1[id].x * in2[id].x - in1[id].y * in2[id].y;
out[id].y = in1[id].x * in2[id].y + in1[id].y * in2[id].x;
}
}
}
namespace FFT_Cuda
{
ComplexData::ComplexData()
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
}
ComplexData::ComplexData( const PenguinV_Image::Image & image )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
set( image );
}
ComplexData::ComplexData( const ComplexData & data )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
_copy( data );
}
ComplexData::ComplexData( ComplexData && data )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
_swap( data );
}
ComplexData & ComplexData::operator=( const ComplexData & data )
{
_copy( data );
return *this;
}
ComplexData & ComplexData::operator=( ComplexData && data )
{
_swap( data );
return *this;
}
ComplexData::~ComplexData()
{
_clean();
}
void ComplexData::set( const PenguinV_Image::Image & image )
{
if( image.empty() || image.colorCount() != 1u )
throw imageException( "Failed to allocate complex data for empty or coloured image" );
_clean();
multiCuda::cudaCheck( hipMalloc( &_data, (image.width() * image.height()) * sizeof( hipfftComplex ) ) );
_width = image.width();
_height = image.height();
launchKernel2D( copyFromImageCuda, _width, _height,
image.data(), _data, _width, _height );
}
void ComplexData::set( const multiCuda::Array<float> & data )
{
if( data.empty() || _width == 0 || _height == 0 || data.size() != _width * _height )
throw imageException( "Failed to allocate complex data for empty or coloured image" );
launchKernel2D( copyFromFloatCuda, _width, _height,
data.data(), _data, _width, _height );
}
PenguinV_Image::Image ComplexData::get() const
{
if( empty() )
return PenguinV_Image::Image();
PenguinV_Image::Image image( _width, _height );
const float size = static_cast<float>(image.width() * image.height());
launchKernel2D( copyToImageCuda, _width, _height,
_data, image.data(), size, _width, _height );
return image;
}
void ComplexData::resize( uint32_t width_, uint32_t height_ )
{
if( (width_ != _width || height_ != _height) && width_ != 0 && height_ != 0 ) {
_clean();
multiCuda::cudaCheck( hipMalloc( &_data, (width_ * height_) * sizeof( hipfftComplex ) ) );
_width = width_;
_height = height_;
}
}
hipfftComplex * ComplexData::data()
{
return _data;
}
const hipfftComplex * ComplexData::data() const
{
return _data;
}
uint32_t ComplexData::width() const
{
return _width;
}
uint32_t ComplexData::height() const
{
return _height;
}
bool ComplexData::empty() const
{
return _data == NULL;
}
void ComplexData::_clean()
{
if( _data != NULL ) {
hipFree( _data );
_data = NULL;
}
_width = 0;
_height = 0;
}
void ComplexData::_copy( const ComplexData & data )
{
_clean();
resize( data._width, data._height );
if( !empty() ) {
if( !multiCuda::cudaSafeCheck( hipMemcpy( _data, data._data, _width * _height * sizeof( hipfftComplex ), hipMemcpyDeviceToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
void ComplexData::_swap( ComplexData & data )
{
std::swap( _data, data._data );
std::swap( _width, data._width );
std::swap( _height, data._height );
}
FFTExecutor::FFTExecutor()
: _plan ( 0 )
, _width ( 0 )
, _height( 0 )
{
}
FFTExecutor::FFTExecutor( uint32_t width_, uint32_t height_ )
: _plan ( 0 )
, _width ( 0 )
, _height( 0 )
{
initialize( width_, height_ );
}
FFTExecutor::~FFTExecutor()
{
_clean();
}
void FFTExecutor::initialize( uint32_t width_, uint32_t height_ )
{
if( width_ == 0 || height_ == 0 )
throw imageException( "Invalid parameters for FFTExecutor" );
_clean();
if( hipfftPlan2d( &_plan, width_, height_, HIPFFT_C2C ) != HIPFFT_SUCCESS )
throw imageException( "Cannot create FFT plan on CUDA device" );
_width = width_;
_height = height_;
}
uint32_t FFTExecutor::width() const
{
return _width;
}
uint32_t FFTExecutor::height() const
{
return _height;
}
void FFTExecutor::directTransform( ComplexData & data )
{
directTransform( data, data );
}
void FFTExecutor::directTransform( ComplexData & in, ComplexData & out )
{
if( _plan == 0 || _width != in.width() || _height != in.height() || _width != out.width() || _height != out.height() )
throw imageException( "Invalid parameters for FFTExecutor" );
if( hipfftExecC2C( _plan, in.data(), out.data(), HIPFFT_FORWARD ) != HIPFFT_SUCCESS )
throw imageException( "Cannot execute direct FFT transform on CUDA device" );
}
void FFTExecutor::inverseTransform( ComplexData & data )
{
inverseTransform( data, data );
}
void FFTExecutor::inverseTransform( ComplexData & in, ComplexData & out )
{
if( _plan == 0 || _width != in.width() || _height != in.height() || _width != out.width() || _height != out.height() )
throw imageException( "Invalid parameters for FFTExecutor" );
if( hipfftExecC2C( _plan, in.data(), out.data(), HIPFFT_BACKWARD ) != HIPFFT_SUCCESS )
throw imageException( "Cannot execute inverse FFT transform on CUDA device" );
}
void FFTExecutor::complexMultiplication( ComplexData & in1, ComplexData & in2, ComplexData & out ) const
{
if( in1.width() != in2.width() || in1.height() != in2.height() || in1.width() != out.width() || in1.height() != out.height() ||
in1.width() == 0 || in1.height() == 0 )
throw imageException( "Invalid parameters for FFTExecutor" );
launchKernel2D( complexMultiplicationCuda, _width, _height,
in1.data(), in2.data(), out.data(), _width, _height );
}
void FFTExecutor::_clean()
{
if( _plan != 0 ) {
hipfftDestroy( _plan );
_plan = 0;
}
_width = 0;
_height = 0;
}
}
|
0511d85361b582f96db40ffb58ee64a8c71c610f.cu
|
#include <cuda_runtime.h>
#include "cuda_fft.cuh"
#include "../thirdparty/multicuda/src/cuda_helper.cuh"
#include "../image_exception.h"
namespace
{
__global__ void copyFromImageCuda( const uint8_t * in, cufftComplex * out, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in[id];
out[id].y = 0;
}
}
__global__ void copyFromFloatCuda( const float * in, cufftComplex * out, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in[id];
out[id].y = 0;
}
}
__global__ void copyToImageCuda( const cufftComplex * in, uint8_t * out, float size, uint32_t width, uint32_t height )
{
const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y;
if( inX < width && inY < height ) {
const uint32_t id = inY * width + inX;
const uint32_t middleX = width / 2;
const uint32_t middleY = height / 2;
const uint32_t outX = (inX < middleX) ? middleX + inX : inX - middleX;
const uint32_t outY = (inY < middleY) ? middleY + inY : inY - middleY;
out[outY * width + outX] = static_cast<uint8_t>(in[id].x / size + 0.5f);
}
}
__global__ void complexMultiplicationCuda( const cufftComplex * in1, const cufftComplex * in2, cufftComplex * out, uint32_t width, uint32_t height )
{
// in1 = A + iB
// in2 = C + iD
// out = in1 * (-in2) = (A + iB) * (-C - iD) = - A * C - i(B * C) - i(A * D) + B * D
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < width && y < height ) {
const uint32_t id = y * width + x;
out[id].x = in1[id].x * in2[id].x - in1[id].y * in2[id].y;
out[id].y = in1[id].x * in2[id].y + in1[id].y * in2[id].x;
}
}
}
namespace FFT_Cuda
{
ComplexData::ComplexData()
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
}
ComplexData::ComplexData( const PenguinV_Image::Image & image )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
set( image );
}
ComplexData::ComplexData( const ComplexData & data )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
_copy( data );
}
ComplexData::ComplexData( ComplexData && data )
: _data ( NULL )
, _width ( 0 )
, _height( 0 )
{
_swap( data );
}
ComplexData & ComplexData::operator=( const ComplexData & data )
{
_copy( data );
return *this;
}
ComplexData & ComplexData::operator=( ComplexData && data )
{
_swap( data );
return *this;
}
ComplexData::~ComplexData()
{
_clean();
}
void ComplexData::set( const PenguinV_Image::Image & image )
{
if( image.empty() || image.colorCount() != 1u )
throw imageException( "Failed to allocate complex data for empty or coloured image" );
_clean();
multiCuda::cudaCheck( cudaMalloc( &_data, (image.width() * image.height()) * sizeof( cufftComplex ) ) );
_width = image.width();
_height = image.height();
launchKernel2D( copyFromImageCuda, _width, _height,
image.data(), _data, _width, _height );
}
void ComplexData::set( const multiCuda::Array<float> & data )
{
if( data.empty() || _width == 0 || _height == 0 || data.size() != _width * _height )
throw imageException( "Failed to allocate complex data for empty or coloured image" );
launchKernel2D( copyFromFloatCuda, _width, _height,
data.data(), _data, _width, _height );
}
PenguinV_Image::Image ComplexData::get() const
{
if( empty() )
return PenguinV_Image::Image();
PenguinV_Image::Image image( _width, _height );
const float size = static_cast<float>(image.width() * image.height());
launchKernel2D( copyToImageCuda, _width, _height,
_data, image.data(), size, _width, _height );
return image;
}
void ComplexData::resize( uint32_t width_, uint32_t height_ )
{
if( (width_ != _width || height_ != _height) && width_ != 0 && height_ != 0 ) {
_clean();
multiCuda::cudaCheck( cudaMalloc( &_data, (width_ * height_) * sizeof( cufftComplex ) ) );
_width = width_;
_height = height_;
}
}
cufftComplex * ComplexData::data()
{
return _data;
}
const cufftComplex * ComplexData::data() const
{
return _data;
}
uint32_t ComplexData::width() const
{
return _width;
}
uint32_t ComplexData::height() const
{
return _height;
}
bool ComplexData::empty() const
{
return _data == NULL;
}
void ComplexData::_clean()
{
if( _data != NULL ) {
cudaFree( _data );
_data = NULL;
}
_width = 0;
_height = 0;
}
void ComplexData::_copy( const ComplexData & data )
{
_clean();
resize( data._width, data._height );
if( !empty() ) {
if( !multiCuda::cudaSafeCheck( cudaMemcpy( _data, data._data, _width * _height * sizeof( cufftComplex ), cudaMemcpyDeviceToDevice ) ) )
throw imageException( "Cannot copy a memory to CUDA device" );
}
}
void ComplexData::_swap( ComplexData & data )
{
std::swap( _data, data._data );
std::swap( _width, data._width );
std::swap( _height, data._height );
}
FFTExecutor::FFTExecutor()
: _plan ( 0 )
, _width ( 0 )
, _height( 0 )
{
}
FFTExecutor::FFTExecutor( uint32_t width_, uint32_t height_ )
: _plan ( 0 )
, _width ( 0 )
, _height( 0 )
{
initialize( width_, height_ );
}
FFTExecutor::~FFTExecutor()
{
_clean();
}
void FFTExecutor::initialize( uint32_t width_, uint32_t height_ )
{
if( width_ == 0 || height_ == 0 )
throw imageException( "Invalid parameters for FFTExecutor" );
_clean();
if( cufftPlan2d( &_plan, width_, height_, CUFFT_C2C ) != CUFFT_SUCCESS )
throw imageException( "Cannot create FFT plan on CUDA device" );
_width = width_;
_height = height_;
}
uint32_t FFTExecutor::width() const
{
return _width;
}
uint32_t FFTExecutor::height() const
{
return _height;
}
void FFTExecutor::directTransform( ComplexData & data )
{
directTransform( data, data );
}
void FFTExecutor::directTransform( ComplexData & in, ComplexData & out )
{
if( _plan == 0 || _width != in.width() || _height != in.height() || _width != out.width() || _height != out.height() )
throw imageException( "Invalid parameters for FFTExecutor" );
if( cufftExecC2C( _plan, in.data(), out.data(), CUFFT_FORWARD ) != CUFFT_SUCCESS )
throw imageException( "Cannot execute direct FFT transform on CUDA device" );
}
void FFTExecutor::inverseTransform( ComplexData & data )
{
inverseTransform( data, data );
}
void FFTExecutor::inverseTransform( ComplexData & in, ComplexData & out )
{
if( _plan == 0 || _width != in.width() || _height != in.height() || _width != out.width() || _height != out.height() )
throw imageException( "Invalid parameters for FFTExecutor" );
if( cufftExecC2C( _plan, in.data(), out.data(), CUFFT_INVERSE ) != CUFFT_SUCCESS )
throw imageException( "Cannot execute inverse FFT transform on CUDA device" );
}
void FFTExecutor::complexMultiplication( ComplexData & in1, ComplexData & in2, ComplexData & out ) const
{
if( in1.width() != in2.width() || in1.height() != in2.height() || in1.width() != out.width() || in1.height() != out.height() ||
in1.width() == 0 || in1.height() == 0 )
throw imageException( "Invalid parameters for FFTExecutor" );
launchKernel2D( complexMultiplicationCuda, _width, _height,
in1.data(), in2.data(), out.data(), _width, _height );
}
void FFTExecutor::_clean()
{
if( _plan != 0 ) {
cufftDestroy( _plan );
_plan = 0;
}
_width = 0;
_height = 0;
}
}
|
c92c5a8acb6f352fd76c6f5125eb6f9448ace393.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CoreMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ float4* debugData;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CoreMaterial* p ) { hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { hipMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
hipMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { hipMemcpyToSymbol( skywidth, &w, sizeof( int ) ); hipMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetDebugData( float4* p ) { hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { hipMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { hipMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "pathtracer.h"
#include "finalize_shared.h"
#include "camera.h"
#include "connections.h"
} // namespace lh2core
// EOF
|
c92c5a8acb6f352fd76c6f5125eb6f9448ace393.cu
|
/* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CoreMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ float4* debugData;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CoreMaterial* p ) { cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { cudaMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
cudaMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { cudaMemcpyToSymbol( skywidth, &w, sizeof( int ) ); cudaMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetDebugData( float4* p ) { cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { cudaMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { cudaMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->activePaths = pathCount; // remaining active paths
counters->extensionRays = 0; // compaction counter for extension rays
counters->shadowRays = 0; // compaction counter for connections
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersSubsequent_Kernel()
{
if (threadIdx.x != 0) return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // remaining active paths
counters->extensionRays = 0; // compaction counter for extension rays
}
__host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "bsdf.h"
#include "pathtracer.h"
#include "finalize_shared.h"
#include "camera.h"
#include "connections.h"
} // namespace lh2core
// EOF
|
7722516a69649ec618bffc6c7c9c8f56732b56d1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ForwardSoftmax(float *Z, int nColsZ, float *sumExp, float *A)
{
int row = threadIdx.x;
int col = blockIdx.x;
atomicAdd(&sumExp[col], exp(Z[row * nColsZ + col]));
__syncthreads();
A[row * nColsZ + col] = exp(Z[row * nColsZ + col]) / sumExp[col];
}
|
7722516a69649ec618bffc6c7c9c8f56732b56d1.cu
|
#include "includes.h"
__global__ void ForwardSoftmax(float *Z, int nColsZ, float *sumExp, float *A)
{
int row = threadIdx.x;
int col = blockIdx.x;
atomicAdd(&sumExp[col], exp(Z[row * nColsZ + col]));
__syncthreads();
A[row * nColsZ + col] = exp(Z[row * nColsZ + col]) / sumExp[col];
}
|
4294c0a0fdad379501047868888999030c0eaa51.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "filter2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
unsigned char *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
unsigned char *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
filter2), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,src,dest);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
filter2), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,src,dest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
filter2), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,src,dest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4294c0a0fdad379501047868888999030c0eaa51.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "filter2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
unsigned char *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
unsigned char *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
filter2<<<gridBlock,threadBlock>>>(width,height,src,dest);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
filter2<<<gridBlock,threadBlock>>>(width,height,src,dest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
filter2<<<gridBlock,threadBlock>>>(width,height,src,dest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
84844b13c768c97608752ccaa3b9cd5d7e367a8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../THCTensorSort.cuh"
#include "THHTensor.hpp"
#include "THHStream.hpp"
#include "../generic/THCTensorSort.cu"
#include "../THCGenerateByteType.h"
|
84844b13c768c97608752ccaa3b9cd5d7e367a8a.cu
|
#include "../THCTensorSort.cuh"
#include "THCTensor.hpp"
#include "THCStream.hpp"
#include "../generic/THCTensorSort.cu"
#include "../THCGenerateByteType.h"
|
c60b30b65a6bd82cb54145edbde0fde94d7f6bd4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define CHECK_CUDA_ERROR(exp) { \
hipError_t ret = (exp); \
if (ret != hipSuccess) { \
fprintf(stderr, "[error] %s:%d: %s (%s)\n", \
__FILE__, __LINE__, \
hipGetErrorName(ret), \
hipGetErrorString(ret)); \
exit(EXIT_FAILURE); \
} \
}
//
// A macro that accesses the element on the i'th row and the j'th column of a
// matrix A.
//
// The matrix A (m rows, n columns) is stored in column-major format, i.e., the
// columns are stored continuously in the memory. The leading dimension (ldA)
// defines how many words (double-precision floating point numbers in this case)
// are allocated for each column. That is, A[j*ldA+i] is the element on the i'th
// row and the j'th column of the matrix.
//
#define _A(i, j) (A[(size_t)(j)*ldA+(i)])
// fix thread block dimensions so that blockDim.x = blockDim.y = warp size
#define THREAD_BLOCK_SIZE 32
// a kernel that perform a matrix-vector multiplication y = A * x, where the
// matrix A has m rows and n columns
__global__ void gemv_kernel(
int m, int n, int ldA, double const *A, double const *x, double *y)
{
// dynamically allocated shared memory array
__shared__ double tmp[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
// we are assuming that each row of the vector y gets it's own thread in
// the y dimension
int thread_id = blockIdx.x * THREAD_BLOCK_SIZE + threadIdx.x;
double v = 0.0;
if (thread_id < m) {
//
// loop over the corresponding row of the matrix A and the vector x
//
// |y_0| |A_00 A_01 A_02 .... | |x_0|
// |y_1| |A_10 A_11 A_12 .... | |x_1|
// |y_2| = |A_20 A_21 A_22 .... | * |x_2|
// |...| |.... .... .... .... | |...|
// |...| |.... .... .... .... | |...|
//
// y_k = A_k0 * x_0 + A_k1 * x_1 + A_k2 * x_2 ...
//
for (int i = threadIdx.y; i < n; i += THREAD_BLOCK_SIZE)
v += _A(thread_id, i) * x[i];
}
// each thread stores it's partial sum to the shared memory array
tmp[threadIdx.x][threadIdx.y] = v;
// wait until all threads are ready
__syncthreads();
// sum together the partial sums (note the swapped x-y dimensions)
int active = THREAD_BLOCK_SIZE/2;
while (0 < active) {
if (threadIdx.x < active)
tmp[threadIdx.y][threadIdx.x] +=
tmp[threadIdx.y][threadIdx.x + active];
active /= 2;
__syncthreads();
// The above __syncthreads() call could be replaced with a __syncwarp()
// call. The __syncwarp() function synchronizes the **warp**. This could
// potentially improve the performance as the warps do not have to wait
// each other.
// __syncwarp();
}
// __syncthreads(); // needed with __syncwarp
if (thread_id < m && threadIdx.y == 0)
y[thread_id] = tmp[threadIdx.x][0];
}
int main(int argc, char **argv)
{
// read and validate the command line arguments
if (argc < 2) {
fprintf(stderr, "[error] No matrix height was supplied.\n");
return EXIT_FAILURE;
}
if (argc < 3) {
fprintf(stderr, "[error] No matrix width was supplied.\n");
return EXIT_FAILURE;
}
int m = atof(argv[1]);
if (m < 1) {
fprintf(stderr, "[error] The matrix height was invalid.\n");
return EXIT_FAILURE;
}
int n = atof(argv[2]);
if (n < 1) {
fprintf(stderr, "[error] The matrix width was invalid.\n");
return EXIT_FAILURE;
}
srand(time(NULL));
// allocate host memory for the matrix A and the vectors y and x
double *A; int ldA = m; // only for illustrational purposes only
if ((A = (double *) malloc(n*ldA*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for matrix A.\n");
return EXIT_FAILURE;
}
double *y, *x;
if ((y = (double *) malloc(m*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for vector y.\n");
return EXIT_FAILURE;
}
if ((x = (double *) malloc(n*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for vector x.\n");
return EXIT_FAILURE;
}
// initialize host memory
for (int i = 0; i < n; i++) {
x[i] = 2.0*rand()/RAND_MAX - 1.0;
for (int j = 0; j < m; j++)
_A(j, i) = 2.0*rand()/RAND_MAX - 1.0;
}
// allocate device memory
double *d_A, *d_y, *d_x; int ld_dA;
{
size_t pitch;
CHECK_CUDA_ERROR(hipMallocPitch(&d_A, &pitch, m*sizeof(double), n));
ld_dA = pitch/sizeof(double);
}
CHECK_CUDA_ERROR(hipMalloc(&d_y, m*sizeof(double)));
CHECK_CUDA_ERROR(hipMalloc(&d_x, n*sizeof(double)));
// copy the matrix A and the vector x from the host memory to the device
// memory
CHECK_CUDA_ERROR(
hipMemcpy2D(d_A, ld_dA*sizeof(double), A, ldA*sizeof(double),
m*sizeof(double), n, hipMemcpyHostToDevice));
CHECK_CUDA_ERROR(
hipMemcpy(d_x, x, n*sizeof(double), hipMemcpyHostToDevice));
// start timer
struct timespec start, stop;
clock_gettime(CLOCK_REALTIME, &start);
// launch the kernel
dim3 threads(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
dim3 blocks((m+THREAD_BLOCK_SIZE-1)/THREAD_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( gemv_kernel), dim3(blocks), dim3(threads), 0, 0, m, n, ld_dA, d_A, d_x, d_y);
CHECK_CUDA_ERROR(hipDeviceSynchronize());
// stop timer
clock_gettime(CLOCK_REALTIME, &stop);
double time =
(stop.tv_sec - start.tv_sec) + (stop.tv_nsec - start.tv_nsec)*1E-9;
printf("Runtime was %f seconds.\n", time);
// copy the vector y from the device memory to the host memory
CHECK_CUDA_ERROR(
hipMemcpy(y, d_y, m*sizeof(double), hipMemcpyDeviceToHost));
// validate the result by computing sqrt((A*x - y)^2)
double res = 0.0, nor = 0.0;
for (int i = 0; i < m; i++) {
double v = 0.0;
for (int j = 0; j < n; j++)
v += _A(i, j) * x[j];
res += (v - y[i]) * (v - y[i]);
nor += v*v;
}
printf("Residual = %e\n", sqrt(res)/sqrt(nor));
// free the allocated memory
free(A); free(y); free(x);
CHECK_CUDA_ERROR(hipFree(d_A));
CHECK_CUDA_ERROR(hipFree(d_y));
CHECK_CUDA_ERROR(hipFree(d_x));
return EXIT_SUCCESS;
}
|
c60b30b65a6bd82cb54145edbde0fde94d7f6bd4.cu
|
#include <stdlib.h>
#include <stdio.h>
#define CHECK_CUDA_ERROR(exp) { \
cudaError_t ret = (exp); \
if (ret != cudaSuccess) { \
fprintf(stderr, "[error] %s:%d: %s (%s)\n", \
__FILE__, __LINE__, \
cudaGetErrorName(ret), \
cudaGetErrorString(ret)); \
exit(EXIT_FAILURE); \
} \
}
//
// A macro that accesses the element on the i'th row and the j'th column of a
// matrix A.
//
// The matrix A (m rows, n columns) is stored in column-major format, i.e., the
// columns are stored continuously in the memory. The leading dimension (ldA)
// defines how many words (double-precision floating point numbers in this case)
// are allocated for each column. That is, A[j*ldA+i] is the element on the i'th
// row and the j'th column of the matrix.
//
#define _A(i, j) (A[(size_t)(j)*ldA+(i)])
// fix thread block dimensions so that blockDim.x = blockDim.y = warp size
#define THREAD_BLOCK_SIZE 32
// a kernel that perform a matrix-vector multiplication y = A * x, where the
// matrix A has m rows and n columns
__global__ void gemv_kernel(
int m, int n, int ldA, double const *A, double const *x, double *y)
{
// dynamically allocated shared memory array
__shared__ double tmp[THREAD_BLOCK_SIZE][THREAD_BLOCK_SIZE];
// we are assuming that each row of the vector y gets it's own thread in
// the y dimension
int thread_id = blockIdx.x * THREAD_BLOCK_SIZE + threadIdx.x;
double v = 0.0;
if (thread_id < m) {
//
// loop over the corresponding row of the matrix A and the vector x
//
// |y_0| |A_00 A_01 A_02 .... | |x_0|
// |y_1| |A_10 A_11 A_12 .... | |x_1|
// |y_2| = |A_20 A_21 A_22 .... | * |x_2|
// |...| |.... .... .... .... | |...|
// |...| |.... .... .... .... | |...|
//
// y_k = A_k0 * x_0 + A_k1 * x_1 + A_k2 * x_2 ...
//
for (int i = threadIdx.y; i < n; i += THREAD_BLOCK_SIZE)
v += _A(thread_id, i) * x[i];
}
// each thread stores it's partial sum to the shared memory array
tmp[threadIdx.x][threadIdx.y] = v;
// wait until all threads are ready
__syncthreads();
// sum together the partial sums (note the swapped x-y dimensions)
int active = THREAD_BLOCK_SIZE/2;
while (0 < active) {
if (threadIdx.x < active)
tmp[threadIdx.y][threadIdx.x] +=
tmp[threadIdx.y][threadIdx.x + active];
active /= 2;
__syncthreads();
// The above __syncthreads() call could be replaced with a __syncwarp()
// call. The __syncwarp() function synchronizes the **warp**. This could
// potentially improve the performance as the warps do not have to wait
// each other.
// __syncwarp();
}
// __syncthreads(); // needed with __syncwarp
if (thread_id < m && threadIdx.y == 0)
y[thread_id] = tmp[threadIdx.x][0];
}
int main(int argc, char **argv)
{
// read and validate the command line arguments
if (argc < 2) {
fprintf(stderr, "[error] No matrix height was supplied.\n");
return EXIT_FAILURE;
}
if (argc < 3) {
fprintf(stderr, "[error] No matrix width was supplied.\n");
return EXIT_FAILURE;
}
int m = atof(argv[1]);
if (m < 1) {
fprintf(stderr, "[error] The matrix height was invalid.\n");
return EXIT_FAILURE;
}
int n = atof(argv[2]);
if (n < 1) {
fprintf(stderr, "[error] The matrix width was invalid.\n");
return EXIT_FAILURE;
}
srand(time(NULL));
// allocate host memory for the matrix A and the vectors y and x
double *A; int ldA = m; // only for illustrational purposes only
if ((A = (double *) malloc(n*ldA*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for matrix A.\n");
return EXIT_FAILURE;
}
double *y, *x;
if ((y = (double *) malloc(m*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for vector y.\n");
return EXIT_FAILURE;
}
if ((x = (double *) malloc(n*sizeof(double))) == NULL) {
fprintf(stderr,
"[error] Failed to allocate host memory for vector x.\n");
return EXIT_FAILURE;
}
// initialize host memory
for (int i = 0; i < n; i++) {
x[i] = 2.0*rand()/RAND_MAX - 1.0;
for (int j = 0; j < m; j++)
_A(j, i) = 2.0*rand()/RAND_MAX - 1.0;
}
// allocate device memory
double *d_A, *d_y, *d_x; int ld_dA;
{
size_t pitch;
CHECK_CUDA_ERROR(cudaMallocPitch(&d_A, &pitch, m*sizeof(double), n));
ld_dA = pitch/sizeof(double);
}
CHECK_CUDA_ERROR(cudaMalloc(&d_y, m*sizeof(double)));
CHECK_CUDA_ERROR(cudaMalloc(&d_x, n*sizeof(double)));
// copy the matrix A and the vector x from the host memory to the device
// memory
CHECK_CUDA_ERROR(
cudaMemcpy2D(d_A, ld_dA*sizeof(double), A, ldA*sizeof(double),
m*sizeof(double), n, cudaMemcpyHostToDevice));
CHECK_CUDA_ERROR(
cudaMemcpy(d_x, x, n*sizeof(double), cudaMemcpyHostToDevice));
// start timer
struct timespec start, stop;
clock_gettime(CLOCK_REALTIME, &start);
// launch the kernel
dim3 threads(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE);
dim3 blocks((m+THREAD_BLOCK_SIZE-1)/THREAD_BLOCK_SIZE, 1);
gemv_kernel<<<blocks, threads>>>(m, n, ld_dA, d_A, d_x, d_y);
CHECK_CUDA_ERROR(cudaDeviceSynchronize());
// stop timer
clock_gettime(CLOCK_REALTIME, &stop);
double time =
(stop.tv_sec - start.tv_sec) + (stop.tv_nsec - start.tv_nsec)*1E-9;
printf("Runtime was %f seconds.\n", time);
// copy the vector y from the device memory to the host memory
CHECK_CUDA_ERROR(
cudaMemcpy(y, d_y, m*sizeof(double), cudaMemcpyDeviceToHost));
// validate the result by computing sqrt((A*x - y)^2)
double res = 0.0, nor = 0.0;
for (int i = 0; i < m; i++) {
double v = 0.0;
for (int j = 0; j < n; j++)
v += _A(i, j) * x[j];
res += (v - y[i]) * (v - y[i]);
nor += v*v;
}
printf("Residual = %e\n", sqrt(res)/sqrt(nor));
// free the allocated memory
free(A); free(y); free(x);
CHECK_CUDA_ERROR(cudaFree(d_A));
CHECK_CUDA_ERROR(cudaFree(d_y));
CHECK_CUDA_ERROR(cudaFree(d_x));
return EXIT_SUCCESS;
}
|
7decad10784b791bb779d1151952abe98c5618d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
hipStream_t master_kernel_stream;
hipStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(hipStreamCreate(&runtime_stream));
checkCudaErrors(hipStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(hipHostMalloc(&done, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(hipHostMalloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), hipHostMallocDefault));
// device task buffer
checkCudaErrors(hipMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(hipHostMalloc(&totalScheTasks, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
//MasterKernel
hipLaunchKernelGGL(( masterKernel), dim3(BK_NUM), dim3(TD_NUM), SH_MEM_SIZE, master_kernel_stream, doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1)
{
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(hipMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(hipMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct),
hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(hipMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(hipStreamDestroy(master_kernel_stream));
checkCudaErrors(hipStreamDestroy(runtime_stream));
checkCudaErrors(hipHostFree(done));
checkCudaErrors(hipHostFree(ccTaskPool));
checkCudaErrors(hipHostFree(totalScheTasks));
checkCudaErrors(hipFree(doneDev));
checkCudaErrors(hipFree(ggTaskPool));
checkCudaErrors(hipFree(totalScheTasksDev));
}
|
7decad10784b791bb779d1151952abe98c5618d5.cu
|
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
cudaStream_t master_kernel_stream;
cudaStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(cudaStreamCreate(&runtime_stream));
checkCudaErrors(cudaStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(cudaHostAlloc(&done, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(cudaHostAlloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), cudaHostAllocDefault));
// device task buffer
checkCudaErrors(cudaMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(cudaHostAlloc(&totalScheTasks, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
//MasterKernel
masterKernel<<<BK_NUM, TD_NUM, SH_MEM_SIZE, master_kernel_stream>>>(doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1)
{
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
} // End for paraN
checkCudaErrors(cudaMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(cudaMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct),
cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(cudaMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(cudaStreamDestroy(master_kernel_stream));
checkCudaErrors(cudaStreamDestroy(runtime_stream));
checkCudaErrors(cudaFreeHost(done));
checkCudaErrors(cudaFreeHost(ccTaskPool));
checkCudaErrors(cudaFreeHost(totalScheTasks));
checkCudaErrors(cudaFree(doneDev));
checkCudaErrors(cudaFree(ggTaskPool));
checkCudaErrors(cudaFree(totalScheTasksDev));
}
|
724c94f67cb16f0860532e433b5df0227c05240a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
#define THREADS_PER_BLOCK 32
#define NUM_BLOCKS 32
typedef double HighlyPrecise;
const int GENOME_LENGTH = 14;
const int GENE_MAX = 1;
const float MUTATION_FACTOR = 0.2;
const float CROSSOVER_RATE = 0.6;
const int NUM_EPOCHS = 1000;
struct Chromosome {
HighlyPrecise genes[GENOME_LENGTH];
HighlyPrecise fitnessValue;
};
__global__ void setupRandomStream(unsigned int seed, hiprandState_t* states) {
int threadIndex = blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init(seed, threadIndex, 0, &states[threadIndex]);
}
|
724c94f67cb16f0860532e433b5df0227c05240a.cu
|
#include "includes.h"
using namespace std;
#define THREADS_PER_BLOCK 32
#define NUM_BLOCKS 32
typedef double HighlyPrecise;
const int GENOME_LENGTH = 14;
const int GENE_MAX = 1;
const float MUTATION_FACTOR = 0.2;
const float CROSSOVER_RATE = 0.6;
const int NUM_EPOCHS = 1000;
struct Chromosome {
HighlyPrecise genes[GENOME_LENGTH];
HighlyPrecise fitnessValue;
};
__global__ void setupRandomStream(unsigned int seed, curandState* states) {
int threadIndex = blockDim.x * blockIdx.x + threadIdx.x;
curand_init(seed, threadIndex, 0, &states[threadIndex]);
}
|
fd94acf558feea4a9e25bfdef7167146a6dfa730.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include "wb.h"
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define Clamp(a, start, end) (max(min(a, end), start))
#define value(arry, i, j, k) (arry[((i)*width + (j)) * depth + (k)])
#define output(i, j, k) value(deviceOutputData, i, j, k)
#define input(i, j, k) value(deviceInputData, i, j, k)
#define shared_data(i, j, k) shared_data[i*121 + j*11 + k]
__global__ void stencil(float *deviceOutputData, float *deviceInputData, int width, int height,
int depth, int k) {
//@@ INSERT CODE HERE
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int x = threadIdx.x;
int y = threadIdx.y;
__shared__ float shared_data[11*11*3];
if (i < height && j < width) {
shared_data(x+1, y+1, 0) = input(i, j, k - 1);
shared_data(x+1, y+1, 1) = input(i, j, k);
shared_data(x+1, y+1, 2) = input(i, j, k + 1);
if (x == 0 && i - 1 >= 0) {
shared_data(x, y+1, 0) = input(i-1, j, k - 1);
shared_data(x, y+1, 1) = input(i-1, j, k);
shared_data(x, y+1, 2) = input(i-1, j, k + 1);
}
if (x == 8 && i + 1 < height) {
shared_data(10, y+1, 0) = input(i+1, j, k - 1);
shared_data(10, y+1, 1) = input(i+1, j, k);
shared_data(10, y+1, 2) = input(i+1, j, k + 1);
}
if (y == 0 && j - 1 >= 0) {
shared_data(x+1, y, 0) = input(i, j-1, k - 1);
shared_data(x+1, y, 1) = input(i, j-1, k);
shared_data(x+1, y, 2) = input(i, j-1, k + 1);
}
if (y == 8 && j + 1 < width) {
shared_data(x+1, 10, 0) = input(i, j+1, k - 1);
shared_data(x+1, 10, 1) = input(i, j+1, k);
shared_data(x+1, 10, 2) = input(i, j+1, k + 1);
}
}
__syncthreads();
if(i < 1 || i >= height -1 || j < 1 || j >= width -1) {
return;
}
float res = shared_data(x, y, 0) + shared_data(x, y, 2) + shared_data(x, y + 1, 1) + shared_data(x, y - 1, 1) + shared_data(x + 1, y, 1) + shared_data(x - 1, y, 1) - 6 * shared_data(x, y, 1);
res = Clamp(res, 0.0, 1.0);
output(i, j, k) = res;
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData,
int width, int height, int depth) {
//@@ INSERT CODE HERE
dim3 dimGrid(ceil(height/9.0), ceil(width/9.0), 1);
dim3 dimBlock(9, 9, 1);
for (int i = 1; i <= depth - 2; i++) {
hipLaunchKernelGGL(( stencil), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutputData, deviceInputData, width, height, depth, i);
}
}
// bool checkSoln(float* a, float* b, int h, int w, int d, float *in)
// {
// const float tolerance = 1.5f;
// for(int i=0; i< w; i++){
// for (int j = 0; j < h; j++) {
// for (int k = 0; k < d; k++) {
// int dex = (j * w + i) * d + k;
// int error = a[dex] - b[dex];
// if (error > (1.0f / wbInternal::kImageColorLimit * tolerance)) {
// if (error != 0) {
// printf("(%d, %d, %d): %f %f\n", i, j, k, a[i], b[i]);
// // return false;
// }
// }
// }
// }
// }
// return true;
// }
int main(int argc, char *argv[]) {
wbArg_t args;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
args = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(args, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = (float*)malloc(height*width*depth);
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
hipMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(output.data, deviceOutputData, width * height * depth * sizeof(float),
hipMemcpyDeviceToHost);
hipMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(args, output);
char * eOutputFile = wbArg_getInputFile(args, 1);
wbImage_t eOutput = wbImport(eOutputFile);
float * eOutputData = wbImage_getData(eOutput);
// if (checkSoln(hostOutputData, eOutputData, height, width, depth, hostInputData)) {
// printf("YESS\n");
// }
hipFree(deviceInputData);
hipFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
fd94acf558feea4a9e25bfdef7167146a6dfa730.cu
|
#include<stdio.h>
#include<cuda.h>
#include "wb.h"
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define Clamp(a, start, end) (max(min(a, end), start))
#define value(arry, i, j, k) (arry[((i)*width + (j)) * depth + (k)])
#define output(i, j, k) value(deviceOutputData, i, j, k)
#define input(i, j, k) value(deviceInputData, i, j, k)
#define shared_data(i, j, k) shared_data[i*121 + j*11 + k]
__global__ void stencil(float *deviceOutputData, float *deviceInputData, int width, int height,
int depth, int k) {
//@@ INSERT CODE HERE
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int x = threadIdx.x;
int y = threadIdx.y;
__shared__ float shared_data[11*11*3];
if (i < height && j < width) {
shared_data(x+1, y+1, 0) = input(i, j, k - 1);
shared_data(x+1, y+1, 1) = input(i, j, k);
shared_data(x+1, y+1, 2) = input(i, j, k + 1);
if (x == 0 && i - 1 >= 0) {
shared_data(x, y+1, 0) = input(i-1, j, k - 1);
shared_data(x, y+1, 1) = input(i-1, j, k);
shared_data(x, y+1, 2) = input(i-1, j, k + 1);
}
if (x == 8 && i + 1 < height) {
shared_data(10, y+1, 0) = input(i+1, j, k - 1);
shared_data(10, y+1, 1) = input(i+1, j, k);
shared_data(10, y+1, 2) = input(i+1, j, k + 1);
}
if (y == 0 && j - 1 >= 0) {
shared_data(x+1, y, 0) = input(i, j-1, k - 1);
shared_data(x+1, y, 1) = input(i, j-1, k);
shared_data(x+1, y, 2) = input(i, j-1, k + 1);
}
if (y == 8 && j + 1 < width) {
shared_data(x+1, 10, 0) = input(i, j+1, k - 1);
shared_data(x+1, 10, 1) = input(i, j+1, k);
shared_data(x+1, 10, 2) = input(i, j+1, k + 1);
}
}
__syncthreads();
if(i < 1 || i >= height -1 || j < 1 || j >= width -1) {
return;
}
float res = shared_data(x, y, 0) + shared_data(x, y, 2) + shared_data(x, y + 1, 1) + shared_data(x, y - 1, 1) + shared_data(x + 1, y, 1) + shared_data(x - 1, y, 1) - 6 * shared_data(x, y, 1);
res = Clamp(res, 0.0, 1.0);
output(i, j, k) = res;
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData,
int width, int height, int depth) {
//@@ INSERT CODE HERE
dim3 dimGrid(ceil(height/9.0), ceil(width/9.0), 1);
dim3 dimBlock(9, 9, 1);
for (int i = 1; i <= depth - 2; i++) {
stencil<<<dimGrid, dimBlock>>>(deviceOutputData, deviceInputData, width, height, depth, i);
}
}
// bool checkSoln(float* a, float* b, int h, int w, int d, float *in)
// {
// const float tolerance = 1.5f;
// for(int i=0; i< w; i++){
// for (int j = 0; j < h; j++) {
// for (int k = 0; k < d; k++) {
// int dex = (j * w + i) * d + k;
// int error = a[dex] - b[dex];
// if (error > (1.0f / wbInternal::kImageColorLimit * tolerance)) {
// if (error != 0) {
// printf("(%d, %d, %d): %f %f\n", i, j, k, a[i], b[i]);
// // return false;
// }
// }
// }
// }
// }
// return true;
// }
int main(int argc, char *argv[]) {
wbArg_t args;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
args = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(args, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = (float*)malloc(height*width*depth);
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
cudaMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(output.data, deviceOutputData, width * height * depth * sizeof(float),
cudaMemcpyDeviceToHost);
cudaMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(args, output);
char * eOutputFile = wbArg_getInputFile(args, 1);
wbImage_t eOutput = wbImport(eOutputFile);
float * eOutputData = wbImage_getData(eOutput);
// if (checkSoln(hostOutputData, eOutputData, height, width, depth, hostInputData)) {
// printf("YESS\n");
// }
cudaFree(deviceInputData);
cudaFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
8146f64e3a9290d7148b065b3f3b7d09a30cc884.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Zufallszahlen von der GPU
Siehe: https://docs.nvidia.com/cuda/hiprand/index.html
double* randgpu(unsigned int N)
Erzeugt N gleichvertielte Zufallszahlen in (0,1] auf der GPU und
kopiert diese in den Host-Speicher. Host-Speicher wir intern
alloziert und gemanagt. Rckgabewert ist der Zeiger auf den
Host-Speicher.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h> // CURAND Bibliothek header-Datei
#include "common.h"
#include "randgpu.h"
#define BLOCKSIZE 256
//#define DEBUG
static hiprandState_t *d_states=NULL;
static double *d_rnd=NULL;
static double *h_rnd=NULL;
static unsigned int gridsize=0;
__global__ void init_curand(unsigned int seed, hiprandState_t *states)
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
// Initialisieren des Zufallszahlen-Generators
// Der 'state' (Status) wird fr jeden Thread unabhngig gespeichert
hiprand_init(seed, tid, 0, &states[tid]);
}
__global__ void rnd_gpu(double *d_rnd, hiprandState_t *states)
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;;
d_rnd[tid]=hiprand_uniform_double(&states[tid]);
}
static void alloc_random(unsigned int grd)
{
int nth;
nth=grd*BLOCKSIZE;
h_rnd=(double*)malloc(nth*sizeof(double));
CHECK(hipMalloc((void**)&d_rnd,nth*sizeof(double)));
CHECK(hipMalloc((void**)&d_states,nth*sizeof(hiprandState_t)));
#ifdef DEBUG
printf("Initialize random number generator.\n");
#endif
hipLaunchKernelGGL(( init_curand), dim3(grd),dim3(BLOCKSIZE), 0, 0, (unsigned int)seconds(),d_states);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
gridsize=grd;
}
double* randgpu(unsigned int N)
{
unsigned int grd;
grd=(N+BLOCKSIZE-1)/BLOCKSIZE;
if (d_states==NULL)
alloc_random(grd);
if (gridsize<grd)
{
#ifdef DEBUG
printf("reallocate buffers, max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
free(h_rnd);
CHECK(hipFree(d_rnd));
CHECK(hipFree(d_states));
alloc_random(grd);
}
#ifdef DEBUG
printf("max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
hipLaunchKernelGGL(( rnd_gpu), dim3(grd),dim3(BLOCKSIZE), 0, 0, d_rnd,d_states);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(h_rnd, d_rnd, N*sizeof(double), hipMemcpyDeviceToHost));
CHECK(hipGetLastError());
return h_rnd;
}
double* randgpu_device_ptr(unsigned int N)
{
unsigned int grd;
grd=(N+BLOCKSIZE-1)/BLOCKSIZE;
if (d_states==NULL)
alloc_random(grd);
if (gridsize<grd)
{
#ifdef DEBUG
printf("reallocate buffers, max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
free(h_rnd);
CHECK(hipFree(d_rnd));
CHECK(hipFree(d_states));
alloc_random(grd);
}
#ifdef DEBUG
printf("max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
hipLaunchKernelGGL(( rnd_gpu), dim3(grd),dim3(BLOCKSIZE), 0, 0, d_rnd,d_states);
CHECK(hipDeviceSynchronize());
return d_rnd;
}
|
8146f64e3a9290d7148b065b3f3b7d09a30cc884.cu
|
/*
Zufallszahlen von der GPU
Siehe: https://docs.nvidia.com/cuda/curand/index.html
double* randgpu(unsigned int N)
Erzeugt N gleichvertielte Zufallszahlen in (0,1] auf der GPU und
kopiert diese in den Host-Speicher. Host-Speicher wir intern
alloziert und gemanagt. Rückgabewert ist der Zeiger auf den
Host-Speicher.
*/
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h> // CURAND Bibliothek header-Datei
#include "common.h"
#include "randgpu.h"
#define BLOCKSIZE 256
//#define DEBUG
static curandState *d_states=NULL;
static double *d_rnd=NULL;
static double *h_rnd=NULL;
static unsigned int gridsize=0;
__global__ void init_curand(unsigned int seed, curandState *states)
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
// Initialisieren des Zufallszahlen-Generators
// Der 'state' (Status) wird für jeden Thread unabhängig gespeichert
curand_init(seed, tid, 0, &states[tid]);
}
__global__ void rnd_gpu(double *d_rnd, curandState *states)
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;;
d_rnd[tid]=curand_uniform_double(&states[tid]);
}
static void alloc_random(unsigned int grd)
{
int nth;
nth=grd*BLOCKSIZE;
h_rnd=(double*)malloc(nth*sizeof(double));
CHECK(cudaMalloc((void**)&d_rnd,nth*sizeof(double)));
CHECK(cudaMalloc((void**)&d_states,nth*sizeof(curandState)));
#ifdef DEBUG
printf("Initialize random number generator.\n");
#endif
init_curand<<<grd,BLOCKSIZE>>>((unsigned int)seconds(),d_states);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
gridsize=grd;
}
double* randgpu(unsigned int N)
{
unsigned int grd;
grd=(N+BLOCKSIZE-1)/BLOCKSIZE;
if (d_states==NULL)
alloc_random(grd);
if (gridsize<grd)
{
#ifdef DEBUG
printf("reallocate buffers, max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
free(h_rnd);
CHECK(cudaFree(d_rnd));
CHECK(cudaFree(d_states));
alloc_random(grd);
}
#ifdef DEBUG
printf("max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
rnd_gpu<<<grd,BLOCKSIZE>>>(d_rnd,d_states);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(h_rnd, d_rnd, N*sizeof(double), cudaMemcpyDeviceToHost));
CHECK(cudaGetLastError());
return h_rnd;
}
double* randgpu_device_ptr(unsigned int N)
{
unsigned int grd;
grd=(N+BLOCKSIZE-1)/BLOCKSIZE;
if (d_states==NULL)
alloc_random(grd);
if (gridsize<grd)
{
#ifdef DEBUG
printf("reallocate buffers, max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
free(h_rnd);
CHECK(cudaFree(d_rnd));
CHECK(cudaFree(d_states));
alloc_random(grd);
}
#ifdef DEBUG
printf("max. grid: %d, block: %d, N: %d, grid: %d\n",gridsize,BLOCKSIZE,N,grd);
#endif
rnd_gpu<<<grd,BLOCKSIZE>>>(d_rnd,d_states);
CHECK(cudaDeviceSynchronize());
return d_rnd;
}
|
ea3d9e54cc31d3498c9a06218cdac900c0132787.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* File: NetworkUtilities.cpp
* Author: akirby
*
* Created on April 23, 2020, 12:37 PM
*/
/* header files */
#include "Network.h"
#include <unistd.h>
#define MIN(x,y) (x)<(y) ? (x):(y)
#define MAX(x,y) (x)>(y) ? (x):(y)
#define GLOBAL_LOOP(level) \
for (int i = 0; i < globalLayers[level].size(); ++i)
#define BLOCK_LOOP(level) \
for (int i = 0; i < blocks[level].size(); ++i)
#define REVERSE_BLOCK_LOOP(level) \
for (int i = blocks[level].size() - 1; i >= 0; --i)
#define ROOT if(global_rank == 0)
void Network::parametersInitializeHost(std::mt19937 &gen){
/* initialize all global layers for model parallelism consistency */
GLOBAL_LOOP(0) {
globalLayers[0][i]->parametersInitializeHost(gen);
}
}
void Network::parametersHostToDeviceAsync(int level){
/* copy only */
BLOCK_LOOP(level) {
blocks[level][i]->parametersHostToDeviceAsync();
}
}
void Network::setFwdTensors(int nsamples,int level){
cudnnTensorDescriptor_t *tensor = &dataTensor;
GLOBAL_LOOP(level){
globalLayers[level][i]->setFwdTensors(tensor,nsamples);
tensor = globalLayers[level][i]->getTensorDesc();
}
}
void Network::setBwdTensors(int level){
cudnnTensorDescriptor_t *tensor = &dataTensor;
/* ====================== */
/* Build Backward Tensors */
/* ====================== */
/* forward pass through layers */
GLOBAL_LOOP(level){
globalLayers[level][i]->setBwdTensors(tensor);
tensor = globalLayers[level][i]->getTensorDesc();
}
}
void Network::setTensors(int nsamples,int level){
setFwdTensors(nsamples,level);
setBwdTensors(level);
}
void Network::setNetworkBuffers(int nsamples,int level){
/* set tensors and calculate workspaceSize */
setTensors(nsamples,level);
/* set CUDNN workspace and unit vector pointers */
BLOCK_LOOP(level) {
blocks[level][i]->setOneVector(d_onevec);
}
}
Real* Network::getOutHost(int level){
return this->h_out[level];
}
Real* Network::getAdjointHost(int level){
return this->h_adjoint[level];
}
Real* Network::getAdjointDevice(int level){
LayerBlock *first_block = blocks[level].front();
Layer *first_layer = first_block->layers.front();
return first_layer->getAdjoint();
}
int Network::getInSize(int level){
LayerBlock *first_block = blocks[level].front();
Layer *first_layer = first_block->layers.front();
return first_layer->getInSize();
}
int Network::getOutSize(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getOutSize();
}
data_t* Network::getOutDevice(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getOutDevice();
}
cudnnTensorDescriptor_t* Network::getOutTensorDesc(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getTensorDesc();
}
void Network::synchronizeNetwork(int level){
int sind = 0;
BLOCK_LOOP(level) {
std::vector<Layer *> &layers = blocks[level][i]->layers;
for (int l = 0; l < layers.size(); ++l) {
hipStream_t *streamID = layers[l]->getCudaStreamAddress();
asyncCudaStreams[sind++] = streamID;
/* reset block CUDA stream to synchronous version */
layers[l]->setCudaStream(&syncCudaStream);
}
}
}
void Network::asynchronizeNetwork(int level){
int sind = 0;
BLOCK_LOOP(level) {
std::vector<Layer *> &layers = blocks[level][i]->layers;
for (int l = 0; l < layers.size(); ++l) {
layers[l]->setCudaStream(asyncCudaStreams[sind++]);
}
}
}
void Network::display_mnist(Real *image){
Real val;
for (int j = 0; j < 28; j++) {
for (int i = 0; i < 28; i++) {
val = image[28*j + i];
if (val > 0.0) {
printf("\033[1;96m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else {
printf(" ");
}
}
printf("\n");
}
}
void Network::display_layer(Real *image,int nsamples){
int wrap = nsamples/28;
Real val;
for (int j = 0; j < wrap; j++) {
for (int i = 0; i < 28; i++) {
val = image[28*j + i];
if (val > 0.0001) {
printf("\033[1;96m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else if(val < -0.0001) {
printf("\033[1;93m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else {
printf("\033[1;91m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
}
}
printf("\n");
}
}
void Network::displayLayerOutput(Real *d_output,int layer_id,int nsamples){
Real *host_output = (Real *) malloc(nsamples*sizeof(Real));
checkCudaErrors(hipMemcpy(host_output,
d_output,
sizeof(Real)*nsamples,
hipMemcpyDeviceToHost));
printf("Rank[%d]: Layer[%d]: \n",model_rank,layer_id);
display_layer(host_output,nsamples);
free(host_output);
}
void Network::displayFwdOutput(int nsamples,int level){
if(model_rank == model_nranks - 1) {
data_t *output = getOutDevice(level);
checkCudaErrors(hipMemcpy(h_out[level],
output->ptr,
sizeof(Real)*getOutSize(level)*batch_size,
hipMemcpyDeviceToHost));
printf("Rank[%d]: Network Out: \n",model_rank);
Real *out = h_out[level];
for(int k = getOutSize(level)*batch_size - nsamples; k < getOutSize(level)*batch_size; ++k){
printf("out[%d]: %.15f\n",k,out[k]);
}
}
}
void Network::displayBwdOutput(int nsamples,int level){
if(model_rank == 0) {
Real *adjoint = getAdjointDevice(level);
checkCudaErrors(hipMemcpy(h_adjoint[level],
adjoint,
sizeof(Real)*getInSize(level)*batch_size,
hipMemcpyDeviceToHost));
printf("\nRank[%d]: Network Adjoint: \n",model_rank);
Real *adj = h_adjoint[level];
for(int k = 0; k < nsamples; ++k){
printf("adjoint[%d]: ",k);
if(adj[k] > 0.0) printf(" ");
printf(" %.15e\n",adj[k]);
}
}
}
void Network::barrier(const char *str, int sleep_sec){
printf("Rank[%d]: %s\n",model_rank,str);
sleep(sleep_sec);
MPI_Barrier(MPI_COMM_WORLD);
}
|
ea3d9e54cc31d3498c9a06218cdac900c0132787.cu
|
/**
* File: NetworkUtilities.cpp
* Author: akirby
*
* Created on April 23, 2020, 12:37 PM
*/
/* header files */
#include "Network.h"
#include <unistd.h>
#define MIN(x,y) (x)<(y) ? (x):(y)
#define MAX(x,y) (x)>(y) ? (x):(y)
#define GLOBAL_LOOP(level) \
for (int i = 0; i < globalLayers[level].size(); ++i)
#define BLOCK_LOOP(level) \
for (int i = 0; i < blocks[level].size(); ++i)
#define REVERSE_BLOCK_LOOP(level) \
for (int i = blocks[level].size() - 1; i >= 0; --i)
#define ROOT if(global_rank == 0)
void Network::parametersInitializeHost(std::mt19937 &gen){
/* initialize all global layers for model parallelism consistency */
GLOBAL_LOOP(0) {
globalLayers[0][i]->parametersInitializeHost(gen);
}
}
void Network::parametersHostToDeviceAsync(int level){
/* copy only */
BLOCK_LOOP(level) {
blocks[level][i]->parametersHostToDeviceAsync();
}
}
void Network::setFwdTensors(int nsamples,int level){
cudnnTensorDescriptor_t *tensor = &dataTensor;
GLOBAL_LOOP(level){
globalLayers[level][i]->setFwdTensors(tensor,nsamples);
tensor = globalLayers[level][i]->getTensorDesc();
}
}
void Network::setBwdTensors(int level){
cudnnTensorDescriptor_t *tensor = &dataTensor;
/* ====================== */
/* Build Backward Tensors */
/* ====================== */
/* forward pass through layers */
GLOBAL_LOOP(level){
globalLayers[level][i]->setBwdTensors(tensor);
tensor = globalLayers[level][i]->getTensorDesc();
}
}
void Network::setTensors(int nsamples,int level){
setFwdTensors(nsamples,level);
setBwdTensors(level);
}
void Network::setNetworkBuffers(int nsamples,int level){
/* set tensors and calculate workspaceSize */
setTensors(nsamples,level);
/* set CUDNN workspace and unit vector pointers */
BLOCK_LOOP(level) {
blocks[level][i]->setOneVector(d_onevec);
}
}
Real* Network::getOutHost(int level){
return this->h_out[level];
}
Real* Network::getAdjointHost(int level){
return this->h_adjoint[level];
}
Real* Network::getAdjointDevice(int level){
LayerBlock *first_block = blocks[level].front();
Layer *first_layer = first_block->layers.front();
return first_layer->getAdjoint();
}
int Network::getInSize(int level){
LayerBlock *first_block = blocks[level].front();
Layer *first_layer = first_block->layers.front();
return first_layer->getInSize();
}
int Network::getOutSize(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getOutSize();
}
data_t* Network::getOutDevice(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getOutDevice();
}
cudnnTensorDescriptor_t* Network::getOutTensorDesc(int level){
LayerBlock *last_block = blocks[level].back();
Layer *last_layer = last_block->layers.back();
return last_layer->getTensorDesc();
}
void Network::synchronizeNetwork(int level){
int sind = 0;
BLOCK_LOOP(level) {
std::vector<Layer *> &layers = blocks[level][i]->layers;
for (int l = 0; l < layers.size(); ++l) {
cudaStream_t *streamID = layers[l]->getCudaStreamAddress();
asyncCudaStreams[sind++] = streamID;
/* reset block CUDA stream to synchronous version */
layers[l]->setCudaStream(&syncCudaStream);
}
}
}
void Network::asynchronizeNetwork(int level){
int sind = 0;
BLOCK_LOOP(level) {
std::vector<Layer *> &layers = blocks[level][i]->layers;
for (int l = 0; l < layers.size(); ++l) {
layers[l]->setCudaStream(asyncCudaStreams[sind++]);
}
}
}
void Network::display_mnist(Real *image){
Real val;
for (int j = 0; j < 28; j++) {
for (int i = 0; i < 28; i++) {
val = image[28*j + i];
if (val > 0.0) {
printf("\033[1;96m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else {
printf(" ");
}
}
printf("\n");
}
}
void Network::display_layer(Real *image,int nsamples){
int wrap = nsamples/28;
Real val;
for (int j = 0; j < wrap; j++) {
for (int i = 0; i < 28; i++) {
val = image[28*j + i];
if (val > 0.0001) {
printf("\033[1;96m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else if(val < -0.0001) {
printf("\033[1;93m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
} else {
printf("\033[1;91m");
printf("%1.1f ", image[28*j + i]);
printf("\033[0m");
}
}
printf("\n");
}
}
void Network::displayLayerOutput(Real *d_output,int layer_id,int nsamples){
Real *host_output = (Real *) malloc(nsamples*sizeof(Real));
checkCudaErrors(cudaMemcpy(host_output,
d_output,
sizeof(Real)*nsamples,
cudaMemcpyDeviceToHost));
printf("Rank[%d]: Layer[%d]: \n",model_rank,layer_id);
display_layer(host_output,nsamples);
free(host_output);
}
void Network::displayFwdOutput(int nsamples,int level){
if(model_rank == model_nranks - 1) {
data_t *output = getOutDevice(level);
checkCudaErrors(cudaMemcpy(h_out[level],
output->ptr,
sizeof(Real)*getOutSize(level)*batch_size,
cudaMemcpyDeviceToHost));
printf("Rank[%d]: Network Out: \n",model_rank);
Real *out = h_out[level];
for(int k = getOutSize(level)*batch_size - nsamples; k < getOutSize(level)*batch_size; ++k){
printf("out[%d]: %.15f\n",k,out[k]);
}
}
}
void Network::displayBwdOutput(int nsamples,int level){
if(model_rank == 0) {
Real *adjoint = getAdjointDevice(level);
checkCudaErrors(cudaMemcpy(h_adjoint[level],
adjoint,
sizeof(Real)*getInSize(level)*batch_size,
cudaMemcpyDeviceToHost));
printf("\nRank[%d]: Network Adjoint: \n",model_rank);
Real *adj = h_adjoint[level];
for(int k = 0; k < nsamples; ++k){
printf("adjoint[%d]: ",k);
if(adj[k] > 0.0) printf(" ");
printf(" %.15e\n",adj[k]);
}
}
}
void Network::barrier(const char *str, int sleep_sec){
printf("Rank[%d]: %s\n",model_rank,str);
sleep(sleep_sec);
MPI_Barrier(MPI_COMM_WORLD);
}
|
07ee811663024152e60f5475b5f98e5b05a01a01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix.hpp"
#define BLOCK_SIZE 16
// Device methods
// CUDA Kernel matvec
__host__ __device__ int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__forceinline__ __device__ int dev_index(int m, int n, int rows, int cols) { return m + n*rows; }
__global__ void dev_matmul(const double* a, const double* b, double* c, int rows, int cols)
{
int m = blockDim.x*blockIdx.x + threadIdx.x;
int n = blockDim.y*blockIdx.y + threadIdx.y;
int k = dev_index(m, n, rows, cols);
double s = 0;
for(int i = 0; i < rows; ++i)
{
int ka = dev_index(m, i, rows, cols);
int kb = dev_index(i, n, rows, cols);
s = s + a[ka] * b[kb];
}
c[k] = s;
}
/*
__global__ void dev_matmul_opt(const double* a, const double* b, double* c, int rows, int cols)
{
int m = blockDim.x*blockIdx.x + threadIdx.x;
int n = blockDim.y*blockIdx.y + threadIdx.y;
int k = dev_index(m, n, rows, cols);
double s = 0;
for(int i = 0; i < rows; ++i)
{
int ka = dev_index(m, i, rows, cols);
int kb = dev_index(i, n, rows, cols);
s = s + a[ka] * b[kb];
}
c[k] = s;
}
*/
Matrix Matrix::device_multiply(const Matrix& a) const
{
Matrix b(_rows, _cols, 0, this->_name + "(device)*" + a._name);
const double *raw_ptr_this = thrust::raw_pointer_cast(this->_dev_vals.data());
const double *raw_ptr_a = thrust::raw_pointer_cast(a._dev_vals.data());
double *raw_ptr_b = thrust::raw_pointer_cast(b._dev_vals.data());
dim3 DimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 DimGrid(iDivUp(_rows, BLOCK_SIZE), iDivUp(_cols, BLOCK_SIZE));
hipLaunchKernelGGL(( dev_matmul), dim3(DimBlock), dim3(DimGrid), 0, 0, raw_ptr_this, raw_ptr_a, raw_ptr_b, _rows, _cols);
//thrust::plus<double> op;
//thrust::transform(_dev_vals.begin(), _dev_vals.end(), a._dev_vals.begin(), b._dev_vals.begin(), op);
return b;
}
|
07ee811663024152e60f5475b5f98e5b05a01a01.cu
|
#include "matrix.hpp"
#define BLOCK_SIZE 16
// Device methods
// CUDA Kernel matvec
__host__ __device__ int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
__forceinline__ __device__ int dev_index(int m, int n, int rows, int cols) { return m + n*rows; }
__global__ void dev_matmul(const double* a, const double* b, double* c, int rows, int cols)
{
int m = blockDim.x*blockIdx.x + threadIdx.x;
int n = blockDim.y*blockIdx.y + threadIdx.y;
int k = dev_index(m, n, rows, cols);
double s = 0;
for(int i = 0; i < rows; ++i)
{
int ka = dev_index(m, i, rows, cols);
int kb = dev_index(i, n, rows, cols);
s = s + a[ka] * b[kb];
}
c[k] = s;
}
/*
__global__ void dev_matmul_opt(const double* a, const double* b, double* c, int rows, int cols)
{
int m = blockDim.x*blockIdx.x + threadIdx.x;
int n = blockDim.y*blockIdx.y + threadIdx.y;
int k = dev_index(m, n, rows, cols);
double s = 0;
for(int i = 0; i < rows; ++i)
{
int ka = dev_index(m, i, rows, cols);
int kb = dev_index(i, n, rows, cols);
s = s + a[ka] * b[kb];
}
c[k] = s;
}
*/
Matrix Matrix::device_multiply(const Matrix& a) const
{
Matrix b(_rows, _cols, 0, this->_name + "(device)*" + a._name);
const double *raw_ptr_this = thrust::raw_pointer_cast(this->_dev_vals.data());
const double *raw_ptr_a = thrust::raw_pointer_cast(a._dev_vals.data());
double *raw_ptr_b = thrust::raw_pointer_cast(b._dev_vals.data());
dim3 DimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 DimGrid(iDivUp(_rows, BLOCK_SIZE), iDivUp(_cols, BLOCK_SIZE));
dev_matmul<<<DimBlock, DimGrid>>>(raw_ptr_this, raw_ptr_a, raw_ptr_b, _rows, _cols);
//thrust::plus<double> op;
//thrust::transform(_dev_vals.begin(), _dev_vals.end(), a._dev_vals.begin(), b._dev_vals.begin(), op);
return b;
}
|
ce9bf0f144f72ed85b33b2afca544d7ec10714c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEDOWN_W+4)
#define BH (SCALEDOWN_H+4)
#define W2 (SCALEDOWN_W/2)
#define H2 (SCALEDOWN_H/2)
__shared__ float irows[BH*BW];
__shared__ float brows[BH*W2];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*SCALEDOWN_W + tx;
const int yp = blockIdx.y*SCALEDOWN_H + ty;
const int xl = min(width-1, max(0, xp-2));
const int yl = min(height-1, max(0, yp-2));
const float k0 = d_ScaleDownKernel[0];
const float k1 = d_ScaleDownKernel[1];
const float k2 = d_ScaleDownKernel[2];
if (xp<(width+4) && yp<(height+4))
irows[BW*ty + tx] = d_Data[yl*pitch + xl];
__syncthreads();
if (yp<(height+4) && tx<W2) {
float *ptr = &irows[BW*ty + 2*tx];
brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2];
}
__syncthreads();
const int xs = blockIdx.x*W2 + tx;
const int ys = blockIdx.y*H2 + ty;
if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) {
float *ptr = &brows[W2*(ty*2) + tx];
d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2];
}
}
|
ce9bf0f144f72ed85b33b2afca544d7ec10714c8.cu
|
#include "includes.h"
__global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEDOWN_W+4)
#define BH (SCALEDOWN_H+4)
#define W2 (SCALEDOWN_W/2)
#define H2 (SCALEDOWN_H/2)
__shared__ float irows[BH*BW];
__shared__ float brows[BH*W2];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*SCALEDOWN_W + tx;
const int yp = blockIdx.y*SCALEDOWN_H + ty;
const int xl = min(width-1, max(0, xp-2));
const int yl = min(height-1, max(0, yp-2));
const float k0 = d_ScaleDownKernel[0];
const float k1 = d_ScaleDownKernel[1];
const float k2 = d_ScaleDownKernel[2];
if (xp<(width+4) && yp<(height+4))
irows[BW*ty + tx] = d_Data[yl*pitch + xl];
__syncthreads();
if (yp<(height+4) && tx<W2) {
float *ptr = &irows[BW*ty + 2*tx];
brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2];
}
__syncthreads();
const int xs = blockIdx.x*W2 + tx;
const int ys = blockIdx.y*H2 + ty;
if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) {
float *ptr = &brows[W2*(ty*2) + tx];
d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2];
}
}
|
bf125d0a44fadbcad22cb0af0206430bef23632e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#define BLOCK_SIZE 16
texture<unsigned char, 2, hipReadModeElementType> inTexture;
__global__ void gpuCalculation(unsigned char* output, int width, int height) {
int txIndex = threadIdx.x + blockIdx.x * blockDim.x;
int tyIndex = threadIdx.y + blockIdx.y * blockDim.y;
if ((txIndex < width) && (tyIndex < height)) {
float txnorm = txIndex / height +0.5f;
float tynorm = tyIndex / width +0.5f;
output[tyIndex*width + txIndex] = tex2D(inTexture, txnorm, tynorm);
}
}
void resize(const cv::Mat & input, cv::Mat & output) {
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
int gray_size = output.step*output.rows;
size_t pitch;
unsigned char *d_input = NULL;
unsigned char *d_output;
hipMallocPitch(&d_input, &pitch, sizeof(unsigned char)*input.step, input.rows);
hipMemcpy2D(d_input, pitch, input.ptr(), sizeof(unsigned char)*input.step, sizeof(unsigned char)*input.step, input.rows, hipMemcpyHostToDevice);
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
inTexture.addressMode[0] = hipAddressModeClamp;
inTexture.addressMode[1] = hipAddressModeClamp;
inTexture.filterMode = hipFilterModeLinear;
inTexture.normalized = true;
hipBindTexture2D(0, inTexture, d_input, desc, input.step, input.rows, pitch);
hipMalloc<unsigned char>(&d_output, gray_size);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((output.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (output.rows + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipEventRecord(start, 0);
gpuCalculation << <grid, block >> > (d_output, output.cols, output.rows);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(output.ptr(), d_output, gray_size, hipMemcpyDeviceToHost);
hipUnbindTexture(inTexture);
hipFree(d_input);
hipFree(d_output);
hipEventElapsedTime(&time, start, stop);
std::cout << "Time for the GPU: " << time << " ms" << std::endl;
}
|
bf125d0a44fadbcad22cb0af0206430bef23632e.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#define BLOCK_SIZE 16
texture<unsigned char, 2, cudaReadModeElementType> inTexture;
__global__ void gpuCalculation(unsigned char* output, int width, int height) {
int txIndex = threadIdx.x + blockIdx.x * blockDim.x;
int tyIndex = threadIdx.y + blockIdx.y * blockDim.y;
if ((txIndex < width) && (tyIndex < height)) {
float txnorm = txIndex / height +0.5f;
float tynorm = tyIndex / width +0.5f;
output[tyIndex*width + txIndex] = tex2D(inTexture, txnorm, tynorm);
}
}
void resize(const cv::Mat & input, cv::Mat & output) {
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int gray_size = output.step*output.rows;
size_t pitch;
unsigned char *d_input = NULL;
unsigned char *d_output;
cudaMallocPitch(&d_input, &pitch, sizeof(unsigned char)*input.step, input.rows);
cudaMemcpy2D(d_input, pitch, input.ptr(), sizeof(unsigned char)*input.step, sizeof(unsigned char)*input.step, input.rows, cudaMemcpyHostToDevice);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
inTexture.addressMode[0] = cudaAddressModeClamp;
inTexture.addressMode[1] = cudaAddressModeClamp;
inTexture.filterMode = cudaFilterModeLinear;
inTexture.normalized = true;
cudaBindTexture2D(0, inTexture, d_input, desc, input.step, input.rows, pitch);
cudaMalloc<unsigned char>(&d_output, gray_size);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((output.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (output.rows + BLOCK_SIZE - 1) / BLOCK_SIZE);
cudaEventRecord(start, 0);
gpuCalculation << <grid, block >> > (d_output, output.cols, output.rows);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(output.ptr(), d_output, gray_size, cudaMemcpyDeviceToHost);
cudaUnbindTexture(inTexture);
cudaFree(d_input);
cudaFree(d_output);
cudaEventElapsedTime(&time, start, stop);
std::cout << "Time for the GPU: " << time << " ms" << std::endl;
}
|
2b680be358cc769a31cc3d11b2ff215dcdce338b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "exclusiveScan.h"
#include "stdio.h"
__global__ void excScan(int* device_start, int* device_result, int n)
{
//Blelloch implementation of exclusive scan
extern __shared__ int temp[];
int thid = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
temp[2*tid] = device_start[2*thid];
temp[2*tid + 1] = device_start[2*thid + 1];
//Up Sweeping
int offset = 1;
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
int a = offset * (2*tid + 1) - 1;
int b = offset * (2*tid + 2) - 1;
temp[b] += temp[a];
}
offset *= 2;
}
if (tid == 0) temp[n-1] = 0;
//Down Sweeping Algorithms
offset = n;
for (int d = 1; d < n; d *= 2)
{
__syncthreads();
if (tid < d)
{
//int a = offset * (2*tid + 1) - 1;
//int b = offset * (2*tid + 2) - 1;
int a = n - tid * offset - 1;
int b = n - tid * offset - 1 - offset / 2;
int tmp = temp[a];
temp[a] += temp[b];
temp[b] = tmp;
}
offset >>= 1;
}
__syncthreads();
device_result[2*thid] = temp[2*tid];
device_result[2*thid + 1] = temp[2*tid + 1];
}
__global__ void postSum(int* device_result, int* scanBlocks, int length)
{
int thid = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
device_result[2*thid] += scanBlocks[bid];
device_result[2*thid + 1] += scanBlocks[bid];
}
__global__ void preSum(int* device_start, int* device_result, int* sumBlocks, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x; // Recursive
//if (tid == 0) sumBlocks[tid] = device_start[n*(tid+1) - 1] + device_result[n*(tid+1) - 1];
if (tid == 0) sumBlocks[bid] = device_start[n*(bid+1) - 1] + device_result[n*(bid+1) - 1];
}
void exclusive_scan(int* device_start, int length, int* device_result)
{
/* Fill in this function with your exclusive scan implementation.
* You are passed the locations of the input and output in device memory,
* but this is host code -- you will need to declare one or more CUDA
* kernels (with the __global__ decorator) in order to actually run code
* in parallel on the GPU.
* Note you are given the real length of the array, but may assume that
* both the input and the output arrays are sized to accommodate the next
* power of 2 larger than the input.
*/
int blocksize = 512; //512;
while (length < 2 * blocksize)
{
blocksize /= 2;
}
//length = nextPow2(length);
int nblocks = (length/2 + blocksize - 1) / blocksize;
int bytesShared = 0;
int* sumBlocks;
int* scanBlocks;
int sizeNext = nextPow2(nblocks);
hipMalloc((void**)&sumBlocks, sizeof(int) * sizeNext);
hipMalloc((void**)&scanBlocks, sizeof(int) * sizeNext);
hipMemset(sumBlocks, 0, sizeof(int) * nblocks);
hipMemset(scanBlocks, 0, sizeof(int) * nblocks);
bytesShared = sizeof(int) * 2 * blocksize;
hipLaunchKernelGGL(( excScan), dim3(nblocks), dim3(blocksize), bytesShared, 0, device_start, device_result, 2*blocksize);
if (nblocks > 1)
{
hipLaunchKernelGGL(( preSum), dim3(nblocks), dim3(blocksize), 0, 0, device_start, device_result, sumBlocks, 2*blocksize);
exclusive_scan(sumBlocks, nblocks, scanBlocks);
hipLaunchKernelGGL(( postSum), dim3(nblocks), dim3(blocksize), 0, 0, device_result, scanBlocks, 2*blocksize);
}
hipFree(sumBlocks);
hipFree(scanBlocks);
}
void cudaScan(int* inarray, int length, int* resultarray)
{
int* device_result;
int* device_input;
int rounded = nextPow2(length);
hipMalloc((void**)&device_result, sizeof(int) * rounded);
hipMalloc((void**)&device_input, sizeof(int) * rounded);
hipMemcpy(device_input, inarray, sizeof(int) * length, hipMemcpyHostToDevice);
hipMemcpy(device_result, inarray, sizeof(int) * length, hipMemcpyHostToDevice);
exclusive_scan(device_input, length, device_result);
hipMemcpy(resultarray, device_result, length * sizeof(int), hipMemcpyDeviceToHost);
}
|
2b680be358cc769a31cc3d11b2ff215dcdce338b.cu
|
#include "cuda.h"
#include "exclusiveScan.h"
#include "stdio.h"
__global__ void excScan(int* device_start, int* device_result, int n)
{
//Blelloch implementation of exclusive scan
extern __shared__ int temp[];
int thid = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
temp[2*tid] = device_start[2*thid];
temp[2*tid + 1] = device_start[2*thid + 1];
//Up Sweeping
int offset = 1;
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
int a = offset * (2*tid + 1) - 1;
int b = offset * (2*tid + 2) - 1;
temp[b] += temp[a];
}
offset *= 2;
}
if (tid == 0) temp[n-1] = 0;
//Down Sweeping Algorithms
offset = n;
for (int d = 1; d < n; d *= 2)
{
__syncthreads();
if (tid < d)
{
//int a = offset * (2*tid + 1) - 1;
//int b = offset * (2*tid + 2) - 1;
int a = n - tid * offset - 1;
int b = n - tid * offset - 1 - offset / 2;
int tmp = temp[a];
temp[a] += temp[b];
temp[b] = tmp;
}
offset >>= 1;
}
__syncthreads();
device_result[2*thid] = temp[2*tid];
device_result[2*thid + 1] = temp[2*tid + 1];
}
__global__ void postSum(int* device_result, int* scanBlocks, int length)
{
int thid = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
device_result[2*thid] += scanBlocks[bid];
device_result[2*thid + 1] += scanBlocks[bid];
}
__global__ void preSum(int* device_start, int* device_result, int* sumBlocks, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x; // Recursive
//if (tid == 0) sumBlocks[tid] = device_start[n*(tid+1) - 1] + device_result[n*(tid+1) - 1];
if (tid == 0) sumBlocks[bid] = device_start[n*(bid+1) - 1] + device_result[n*(bid+1) - 1];
}
void exclusive_scan(int* device_start, int length, int* device_result)
{
/* Fill in this function with your exclusive scan implementation.
* You are passed the locations of the input and output in device memory,
* but this is host code -- you will need to declare one or more CUDA
* kernels (with the __global__ decorator) in order to actually run code
* in parallel on the GPU.
* Note you are given the real length of the array, but may assume that
* both the input and the output arrays are sized to accommodate the next
* power of 2 larger than the input.
*/
int blocksize = 512; //512;
while (length < 2 * blocksize)
{
blocksize /= 2;
}
//length = nextPow2(length);
int nblocks = (length/2 + blocksize - 1) / blocksize;
int bytesShared = 0;
int* sumBlocks;
int* scanBlocks;
int sizeNext = nextPow2(nblocks);
cudaMalloc((void**)&sumBlocks, sizeof(int) * sizeNext);
cudaMalloc((void**)&scanBlocks, sizeof(int) * sizeNext);
cudaMemset(sumBlocks, 0, sizeof(int) * nblocks);
cudaMemset(scanBlocks, 0, sizeof(int) * nblocks);
bytesShared = sizeof(int) * 2 * blocksize;
excScan<<<nblocks, blocksize, bytesShared>>>(device_start, device_result, 2*blocksize);
if (nblocks > 1)
{
preSum<<<nblocks, blocksize>>>(device_start, device_result, sumBlocks, 2*blocksize);
exclusive_scan(sumBlocks, nblocks, scanBlocks);
postSum<<<nblocks, blocksize>>>(device_result, scanBlocks, 2*blocksize);
}
cudaFree(sumBlocks);
cudaFree(scanBlocks);
}
void cudaScan(int* inarray, int length, int* resultarray)
{
int* device_result;
int* device_input;
int rounded = nextPow2(length);
cudaMalloc((void**)&device_result, sizeof(int) * rounded);
cudaMalloc((void**)&device_input, sizeof(int) * rounded);
cudaMemcpy(device_input, inarray, sizeof(int) * length, cudaMemcpyHostToDevice);
cudaMemcpy(device_result, inarray, sizeof(int) * length, cudaMemcpyHostToDevice);
exclusive_scan(device_input, length, device_result);
cudaMemcpy(resultarray, device_result, length * sizeof(int), cudaMemcpyDeviceToHost);
}
|
28baebe6677721477ec5400679850d5b72db29c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* 6_MainLoop.cu
*
* Created on: November 23, 2017
* Author: Gabriel Y Sirat */
/** Contains the next simulation of microimages, in the full measured surface
* with optionally number of laser positions below 16 the value of NIMAGESPARALLEL!!
* **/
#include "0_Mainparameters.h"
#define VERBOSELOOP 1
#define SPARSEDATA 1
#include "0_include.tst"
__managed__ float *new_simus, *Data, *Rfactor, *distribvalidGPU;
__managed__ double MaxNewSimus = 0.0f, EnergyGlobal = 0.0f;
__global__ void BigLoop(devicedata DD) {
extern __shared__ int shared[]; /***************semi-global variables stored in shared memory ***************/
int *image_to_scratchpad_offset_tile = (int *) shared; // Offset of each image in NIMAGESPARALLEL block
float *Scratchpad = (float *) &image_to_scratchpad_offset_tile[NIMAGESPARALLEL]; // ASCRATCH floats for Scratchpad
float *shared_distrib = (float*) &Scratchpad[ASCRATCH]; // ASCRATCH floats for distrib
/*****************constant values & auxiliary variables stored in registers *****************/
register float PSFDISVAL[MAXTHRRATIO] = { 0.0f }; // multiplication of pPSF and distribution
register int tmpi[MAXTHRRATIO], ipixel[MAXTHRRATIO], jpixel[MAXTHRRATIO], valid_pixel[MAXTHRRATIO],
distribpos0[MAXTHRRATIO], distribpos[MAXTHRRATIO];
int tilexdevice, tileydevice, tileXY, tileXYD;
float * scrglobal;
timer = clock64();
/***** INITIALIZATION *****************/
int ithreads = threadIdx.x;
int distrib_number = blockIdx.z;
int itb = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y; // Block position
int itc = ithreads + itb;
int iprint = !VERBOSELOOP + itc;
int center_distrib = ((YDistrib / 2) * XDistrib) + XDistrib / 2;
int center_microimage = (PixZoomo2) * PixZoom + PixZoomo2;
DD.step = 1;
time_init = clock64(); time_start = clock64(); timer = clock64();
#include "8_startdevice.cu"
/*************************Threads and pixels related parameters *******************************/
for (int apix = 0; apix < THreadsRatio; apix++) {
tmpi[apix] = (ithreads + apix * THREADSVAL);
ipixel[apix] = tmpi[apix] % PixZoom - PixZoomo2; // centered on the center of the zoomed microimage
jpixel[apix] = tmpi[apix] / PixZoom - PixZoomo2; // centered on the center of the zoomed microimage
valid_pixel[apix] = tmpi[apix] < PixZoomSquare;
distribpos0[apix] = center_distrib + ipixel[apix] - PSFZoomo2 + (jpixel[apix] - PSFZoomo2) * XDistrib;
}
#include "8_testthreads.cu"
/*************************************************************************************************/
/**O. Initialize zoomed distrib as calculated by the preprocessing /
/************************************************************************************************/
#pragma unroll
for (int idistrub = ithreads; idistrub < ADistrib; idistrub += THREADSVAL)
*(shared_distrib + idistrub) = *(original_distrib + idistrub + distrib_number * ADistrib);
#include "8_testdistrib.cu" // validated
/********************* ***********/
/**A Outer Loop on aggregates **/
/********************* ***********/
for (int aggregx = 0; aggregx < DD.NbAggregx; aggregx++)
for (int aggregy = 0; aggregy < DD.NbAggregy; aggregy++) {
tilexdevice = blockIdx.x + aggregx * DD.tileperaggregatex;
tileydevice = blockIdx.y + aggregy * DD.tileperaggregatey;
tileXY = tilexdevice + DD.NbTilex * tileydevice;
tileXYD = tileXY + distrib_number * DD.NbTileXY;
int MemoryOffset = ASCRATCH * tileXY;
scrglobal = scratchpad_matrix + MemoryOffset;
#include "8_testaggreg.cu"
/**B. Initialize Scratchpad to previous reconstruction in float : OPTIMIZED, also with aggregates/
/************************************************************************************************/
#pragma unroll
for (int iscratch = ithreads; iscratch < ASCRATCH; iscratch += THREADSVAL)
*(Scratchpad + iscratch) = *(scrglobal + iscratch);
#include "8_testscratch.cu"
/** END of B *******************/
/**C Intermediate Loop on images blocks of NIMAGESPARALLEL ********************/
/*********************************************************************************/
/** preparation of intermediate data for each block of NIMAGESPARALLEL************/
register float *pscratch_0[NIMAGESPARALLEL], *pscratch_1[NIMAGESPARALLEL],
*pscratch_2[NIMAGESPARALLEL], *pscratch_3[NIMAGESPARALLEL];
/****************Larger segmented areas to be stored in registers, initialized to zero *************************/
register float NSIF_0[NIMAGESPARALLEL] = { 0.0f }, NSIF_1[NIMAGESPARALLEL] = { 0.0f }, // NSIF New Simulation In Float
NSIF_2[NIMAGESPARALLEL] = { 0.0f }, NSIF_3[NIMAGESPARALLEL] = { 0.0f };
for (int iglobal = 0; iglobal < DD.maxLaserintile; iglobal += NIMAGESPARALLEL) { // image number in global tile list
int zero_posimages = ithreads + (iglobal + tileXYD * DD.maxLaserintile) * NThreads;
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix] = distribpos0[apix];
#include "8_distribpos.cu"
//Thread, for each SM, for each image, on several pixels separated by THREADSVAL of the small block
// C_1. Transfer from global to shared memory the relative position of the beginning of the scratchpad for each image
// C.2 initialize the scratch position for each image for each pixel of the group dealt in this thread
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
*(image_to_scratchpad_offset_tile + iblockima) = *(image_to_scratchpad_offset+ iglobal + iblockima + tileXYD*DD.maxLaserintile);
#include "8_offset.cu"
int pos_0 = image_to_scratchpad_offset_tile[iblockima] + ipixel[0] + jpixel[0] * XSCRATCH;
int pos_1 = image_to_scratchpad_offset_tile[iblockima] + ipixel[1] + jpixel[1] * XSCRATCH;
int pos_2 = image_to_scratchpad_offset_tile[iblockima] + ipixel[2] + jpixel[2] * XSCRATCH;
int pos_3 = image_to_scratchpad_offset_tile[iblockima] + ipixel[3] + jpixel[3] * XSCRATCH;
pscratch_0[iblockima] = (Scratchpad + pos_0); // Change (simplify) in CUDA 9.0
pscratch_1[iblockima] = (Scratchpad + pos_1);
pscratch_2[iblockima] = (Scratchpad + pos_2);
pscratch_3[iblockima] = (Scratchpad + pos_3);
#include "8_pscratchtest.cu"
} // end of blockima small loop
/**************************************/
/******D. SIMUS CALCULATION************/
/**************************************/
for (int jPSF = 0; jPSF < PSFZoom; jPSF++) { // loop on jPSF on y axis - 0 to PSFZoom
# pragma unroll
for (int iPSF = 0; iPSF < PSFZoom; iPSF++) { // loop on iPSF on x axis - ... 0 to PSFZoom
int PSFpos = iPSF + jPSF * PSFZoom; // PSFpos from 0 to PSFZoom*PSFZoom
for (int apix = 0; apix < THreadsRatio; apix++)
PSFDISVAL[apix] = valid_pixel[apix] * *(original_PSF + PSFpos) * *(original_distrib + distribpos[apix]);
/** D_3 Inner loops on THreadsRatio pixels block and on block of NIMAGESPARALLEL images
* require best optimization in assembler **/
# pragma unroll
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
float tmp_0 = *(pscratch_0[iblockima]);
NSIF_0[iblockima] += PSFDISVAL[0] * tmp_0;
float tmp_1 = *(pscratch_1[iblockima]);
NSIF_1[iblockima] += PSFDISVAL[1] * tmp_1;
float tmp_2 = *(pscratch_2[iblockima]);
NSIF_2[iblockima] += PSFDISVAL[2] * tmp_2;
float tmp_3 = *(pscratch_3[iblockima]);
NSIF_3[iblockima] += PSFDISVAL[3] * tmp_3;
#include "8_testdisval.cu"
pscratch_0[iblockima]++;pscratch_1[iblockima]++;
pscratch_2[iblockima]++;pscratch_3[iblockima]++;
} // iblockima most inner loop
#include "8_testdistribvalA.cu"
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix]++; // update intermediate value of distrib
} // iPSF loop
#include "8_testdistribvalB.cu"
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix] += XDistrib - PSFZoom; // update intermediate value of distrib for a full line
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
pscratch_0[iblockima] += XSCRATCH - PSFZoom; pscratch_1[iblockima] += XSCRATCH - PSFZoom;
pscratch_2[iblockima] += XSCRATCH - PSFZoom; pscratch_3[iblockima] += XSCRATCH - PSFZoom;
} // small loop on iblockima
} // loop on jPSF
int it = zero_posimages;
# pragma unroll
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
new_simus[it ] = valid_image[iblockima] * NSIF_0[iblockima];
new_simus[it + 1 * THREADSVAL] = valid_image[iblockima] * NSIF_1[iblockima];
new_simus[it + 2 * THREADSVAL] = valid_image[iblockima] * NSIF_2[iblockima];
new_simus[it + 3 * THREADSVAL] = valid_image[iblockima] * NSIF_3[iblockima];
it += NThreads;
} // end intermediate loop on iblockima
} // end of iglobal loop
} // end of Aggregates loop
if(!iprint && VERBOSE) printf("Energy %8.6f absolute difference %8.6f\n\n", EnergyGlobal, absdiff);
} // end of 6_MainLoop
|
28baebe6677721477ec5400679850d5b72db29c2.cu
|
/*
* 6_MainLoop.cu
*
* Created on: November 23, 2017
* Author: Gabriel Y Sirat */
/** Contains the next simulation of microimages, in the full measured surface
* with optionally number of laser positions below 16 the value of NIMAGESPARALLEL!!
* **/
#include "0_Mainparameters.h"
#define VERBOSELOOP 1
#define SPARSEDATA 1
#include "0_include.tst"
__managed__ float *new_simus, *Data, *Rfactor, *distribvalidGPU;
__managed__ double MaxNewSimus = 0.0f, EnergyGlobal = 0.0f;
__global__ void BigLoop(devicedata DD) {
extern __shared__ int shared[]; /***************semi-global variables stored in shared memory ***************/
int *image_to_scratchpad_offset_tile = (int *) shared; // Offset of each image in NIMAGESPARALLEL block
float *Scratchpad = (float *) &image_to_scratchpad_offset_tile[NIMAGESPARALLEL]; // ASCRATCH floats for Scratchpad
float *shared_distrib = (float*) &Scratchpad[ASCRATCH]; // ASCRATCH floats for distrib
/*****************constant values & auxiliary variables stored in registers *****************/
register float PSFDISVAL[MAXTHRRATIO] = { 0.0f }; // multiplication of pPSF and distribution
register int tmpi[MAXTHRRATIO], ipixel[MAXTHRRATIO], jpixel[MAXTHRRATIO], valid_pixel[MAXTHRRATIO],
distribpos0[MAXTHRRATIO], distribpos[MAXTHRRATIO];
int tilexdevice, tileydevice, tileXY, tileXYD;
float * scrglobal;
timer = clock64();
/***** INITIALIZATION *****************/
int ithreads = threadIdx.x;
int distrib_number = blockIdx.z;
int itb = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y; // Block position
int itc = ithreads + itb;
int iprint = !VERBOSELOOP + itc;
int center_distrib = ((YDistrib / 2) * XDistrib) + XDistrib / 2;
int center_microimage = (PixZoomo2) * PixZoom + PixZoomo2;
DD.step = 1;
time_init = clock64(); time_start = clock64(); timer = clock64();
#include "8_startdevice.cu"
/*************************Threads and pixels related parameters *******************************/
for (int apix = 0; apix < THreadsRatio; apix++) {
tmpi[apix] = (ithreads + apix * THREADSVAL);
ipixel[apix] = tmpi[apix] % PixZoom - PixZoomo2; // centered on the center of the zoomed microimage
jpixel[apix] = tmpi[apix] / PixZoom - PixZoomo2; // centered on the center of the zoomed microimage
valid_pixel[apix] = tmpi[apix] < PixZoomSquare;
distribpos0[apix] = center_distrib + ipixel[apix] - PSFZoomo2 + (jpixel[apix] - PSFZoomo2) * XDistrib;
}
#include "8_testthreads.cu"
/*************************************************************************************************/
/**O. Initialize zoomed distrib as calculated by the preprocessing /
/************************************************************************************************/
#pragma unroll
for (int idistrub = ithreads; idistrub < ADistrib; idistrub += THREADSVAL)
*(shared_distrib + idistrub) = *(original_distrib + idistrub + distrib_number * ADistrib);
#include "8_testdistrib.cu" // validated
/********************* ***********/
/**A Outer Loop on aggregates **/
/********************* ***********/
for (int aggregx = 0; aggregx < DD.NbAggregx; aggregx++)
for (int aggregy = 0; aggregy < DD.NbAggregy; aggregy++) {
tilexdevice = blockIdx.x + aggregx * DD.tileperaggregatex;
tileydevice = blockIdx.y + aggregy * DD.tileperaggregatey;
tileXY = tilexdevice + DD.NbTilex * tileydevice;
tileXYD = tileXY + distrib_number * DD.NbTileXY;
int MemoryOffset = ASCRATCH * tileXY;
scrglobal = scratchpad_matrix + MemoryOffset;
#include "8_testaggreg.cu"
/**B. Initialize Scratchpad to previous reconstruction in float : OPTIMIZED, also with aggregates/
/************************************************************************************************/
#pragma unroll
for (int iscratch = ithreads; iscratch < ASCRATCH; iscratch += THREADSVAL)
*(Scratchpad + iscratch) = *(scrglobal + iscratch);
#include "8_testscratch.cu"
/** END of B *******************/
/**C Intermediate Loop on images blocks of NIMAGESPARALLEL ********************/
/*********************************************************************************/
/** preparation of intermediate data for each block of NIMAGESPARALLEL************/
register float *pscratch_0[NIMAGESPARALLEL], *pscratch_1[NIMAGESPARALLEL],
*pscratch_2[NIMAGESPARALLEL], *pscratch_3[NIMAGESPARALLEL];
/****************Larger segmented areas to be stored in registers, initialized to zero *************************/
register float NSIF_0[NIMAGESPARALLEL] = { 0.0f }, NSIF_1[NIMAGESPARALLEL] = { 0.0f }, // NSIF New Simulation In Float
NSIF_2[NIMAGESPARALLEL] = { 0.0f }, NSIF_3[NIMAGESPARALLEL] = { 0.0f };
for (int iglobal = 0; iglobal < DD.maxLaserintile; iglobal += NIMAGESPARALLEL) { // image number in global tile list
int zero_posimages = ithreads + (iglobal + tileXYD * DD.maxLaserintile) * NThreads;
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix] = distribpos0[apix];
#include "8_distribpos.cu"
//Thread, for each SM, for each image, on several pixels separated by THREADSVAL of the small block
// C_1. Transfer from global to shared memory the relative position of the beginning of the scratchpad for each image
// C.2 initialize the scratch position for each image for each pixel of the group dealt in this thread
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
*(image_to_scratchpad_offset_tile + iblockima) = *(image_to_scratchpad_offset+ iglobal + iblockima + tileXYD*DD.maxLaserintile);
#include "8_offset.cu"
int pos_0 = image_to_scratchpad_offset_tile[iblockima] + ipixel[0] + jpixel[0] * XSCRATCH;
int pos_1 = image_to_scratchpad_offset_tile[iblockima] + ipixel[1] + jpixel[1] * XSCRATCH;
int pos_2 = image_to_scratchpad_offset_tile[iblockima] + ipixel[2] + jpixel[2] * XSCRATCH;
int pos_3 = image_to_scratchpad_offset_tile[iblockima] + ipixel[3] + jpixel[3] * XSCRATCH;
pscratch_0[iblockima] = (Scratchpad + pos_0); // Change (simplify) in CUDA 9.0
pscratch_1[iblockima] = (Scratchpad + pos_1);
pscratch_2[iblockima] = (Scratchpad + pos_2);
pscratch_3[iblockima] = (Scratchpad + pos_3);
#include "8_pscratchtest.cu"
} // end of blockima small loop
/**************************************/
/******D. SIMUS CALCULATION************/
/**************************************/
for (int jPSF = 0; jPSF < PSFZoom; jPSF++) { // loop on jPSF on y axis - 0 to PSFZoom
# pragma unroll
for (int iPSF = 0; iPSF < PSFZoom; iPSF++) { // loop on iPSF on x axis - ... 0 to PSFZoom
int PSFpos = iPSF + jPSF * PSFZoom; // PSFpos from 0 to PSFZoom*PSFZoom
for (int apix = 0; apix < THreadsRatio; apix++)
PSFDISVAL[apix] = valid_pixel[apix] * *(original_PSF + PSFpos) * *(original_distrib + distribpos[apix]);
/** D_3 Inner loops on THreadsRatio pixels block and on block of NIMAGESPARALLEL images
* require best optimization in assembler **/
# pragma unroll
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
float tmp_0 = *(pscratch_0[iblockima]);
NSIF_0[iblockima] += PSFDISVAL[0] * tmp_0;
float tmp_1 = *(pscratch_1[iblockima]);
NSIF_1[iblockima] += PSFDISVAL[1] * tmp_1;
float tmp_2 = *(pscratch_2[iblockima]);
NSIF_2[iblockima] += PSFDISVAL[2] * tmp_2;
float tmp_3 = *(pscratch_3[iblockima]);
NSIF_3[iblockima] += PSFDISVAL[3] * tmp_3;
#include "8_testdisval.cu"
pscratch_0[iblockima]++;pscratch_1[iblockima]++;
pscratch_2[iblockima]++;pscratch_3[iblockima]++;
} // iblockima most inner loop
#include "8_testdistribvalA.cu"
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix]++; // update intermediate value of distrib
} // iPSF loop
#include "8_testdistribvalB.cu"
for (int apix = 0; apix < THreadsRatio; apix++) distribpos[apix] += XDistrib - PSFZoom; // update intermediate value of distrib for a full line
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
pscratch_0[iblockima] += XSCRATCH - PSFZoom; pscratch_1[iblockima] += XSCRATCH - PSFZoom;
pscratch_2[iblockima] += XSCRATCH - PSFZoom; pscratch_3[iblockima] += XSCRATCH - PSFZoom;
} // small loop on iblockima
} // loop on jPSF
int it = zero_posimages;
# pragma unroll
for (int iblockima = 0; iblockima < NIMAGESPARALLEL; iblockima++) {
new_simus[it ] = valid_image[iblockima] * NSIF_0[iblockima];
new_simus[it + 1 * THREADSVAL] = valid_image[iblockima] * NSIF_1[iblockima];
new_simus[it + 2 * THREADSVAL] = valid_image[iblockima] * NSIF_2[iblockima];
new_simus[it + 3 * THREADSVAL] = valid_image[iblockima] * NSIF_3[iblockima];
it += NThreads;
} // end intermediate loop on iblockima
} // end of iglobal loop
} // end of Aggregates loop
if(!iprint && VERBOSE) printf("Energy %8.6f absolute difference %8.6f\n\n", EnergyGlobal, absdiff);
} // end of 6_MainLoop
|
f2d61dc9fe49eedd1ff28160e84af4bbfa30c839.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @brief Utilities for creating FSAs.
*
* Note that serializations are done in Python.
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
* Guoguo Chen
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <sstream>
#include <utility>
#include <vector>
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// field separator within a line for a text form FSA
static constexpr const char *kDelim = " \t";
// Convert a string to an integer. Abort the program on failure.
static int32_t StringToInt(const std::string &s) {
K2_CHECK(!s.empty());
bool ok = false;
char *p = nullptr;
// std::strtol requires a `long` type
long n = std::strtol(s.c_str(), &p, 10); // NOLINT
if (*p == '\0') ok = true;
auto res = static_cast<int32_t>(n);
if (n != res) ok = false; // out of range
K2_CHECK(ok) << "Failed to convert " << s << " to an integer";
return res;
}
// Convert a string to a float. Abort the program on failure.
// TODO(guoguo): We may run into locale problems, with comma vs. period for
// decimals. We have to test if the C code will behave the same
// w.r.t. locale as Python does.
static float StringToFloat(const std::string &s) {
K2_CHECK(!s.empty());
char *p = nullptr;
float f = std::strtof(s.c_str(), &p);
if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float";
return f;
}
// Trim leading and trailing spaces of a string.
static void TrimString(std::string *s) {
K2_CHECK_NE(s, nullptr);
auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; };
s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space));
s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end());
}
/* Split a string to a vector of strings using a set of delimiters.
Example usage:
@code
std::string in = "1 2 3";
const char *delim = " \t";
std::vector<std::string> out;
SplitStringToVector(in, delim, &out);
@endcode
@param [in] in The input string to be split.
@param [in] delim A string of delimiters.
@param [out] out It saves the split result.
*/
static void SplitStringToVector(const std::string &in, const char *delim,
std::vector<std::string> *out) {
K2_CHECK_NE(delim, nullptr);
K2_CHECK_NE(out, nullptr);
out->clear();
std::size_t start = 0;
while (true) {
auto pos = in.find_first_of(delim, start);
if (pos == std::string::npos) break;
auto sub = in.substr(start, pos - start);
start = pos + 1;
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
if (start < in.size()) {
auto sub = in.substr(start);
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
}
/* Create an acceptor from a stream, assuming the acceptor is in the k2 format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa K2AcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 4u) {
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = StringToFloat(splits[3]);
arcs.emplace_back(src_state, dest_state, symbol, score);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]); // this is a final state
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 acceptor expects a line with 1 (final_state) or "
"4 (src_state dest_state label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the K2
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa K2TransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 5u) {
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = StringToFloat(splits[4]);
arcs.emplace_back(src_state, dest_state, symbol, score);
aux_labels_internal.push_back(aux_label);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]);
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 transducer expects a line with 1 (final_state) or "
"5 (src_state dest_state label aux_label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create an acceptor from a stream, assuming the acceptor is in the OpenFST
format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FSA, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, and -1 as its label.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstAcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 3u || num_fields == 4u) {
// 0 1 2
// src_state dest_state label
//
// or
//
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = 0.0f;
if (num_fields == 4u)
score = -1.0f * StringToFloat(splits[3]);
// Add the arc to "state_to_arcs".
++num_arcs;
max_state = ::max(max_state, ::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state)
state_to_arcs.resize(src_state + 1);
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
} else if (num_fields == 1u || num_fields == 2u) {
// 0 1
// final_state score
float score = 0.0f;
if (num_fields == 2u)
score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = ::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST acceptor expects a line with 1 (final_state),"
" 2 (final_state score), 3 (src_state dest_state label) "
"or 4 (src_state dest_state label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FSA, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs".
int32_t arc_index = 0;
arcs.resize(num_arcs);
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the OpenFST
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FST, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, -1 as its label and 0 as its aux_label.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstTransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 4u || num_fields == 5u) {
// 0 1 2 3
// src_state dest_state label aux_label
//
// or
//
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = 0.0f;
if (num_fields == 5u)
score = -1.0f * StringToFloat(splits[4]);
// Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels"
++num_arcs;
max_state = ::max(max_state, ::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) {
state_to_arcs.resize(src_state + 1);
state_to_aux_labels.resize(src_state + 1);
}
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
state_to_aux_labels[src_state].push_back(aux_label);
} else if (num_fields == 1u || num_fields == 2u) {
// 0
// final_state
//
// or
//
// 0 1
// final_state score
// There could be multiple final states, so we first have to collect all
// the final states, and then work out the super final state.
float score = 0.0f;
if (num_fields == 2u)
score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = ::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST transducer expects a line with "
"1 (final_state), 2 (final_state score), "
"4 (src_state dest_state label aux_label) or "
"5 (src_state dest_state label aux_label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FST, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
state_to_aux_labels.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
// TODO(guoguo) We are not sure yet what to put as the auxiliary label for
// arcs entering the super final state. The only real choices
// are kEpsilon or kFinalSymbol. We are using kEpsilon for
// now.
state_to_aux_labels[original_final_states[i]].push_back(0); // kEpsilon
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs", and aux_labels from
// "state_to_aux_labels" to "aux_labels_internal"
int32_t arc_index = 0;
arcs.resize(num_arcs);
aux_labels_internal.resize(num_arcs);
K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size());
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size());
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
aux_labels_internal[arc_index] = state_to_aux_labels[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
Fsa FsaFromString(const std::string &s, bool openfst /*= false*/,
Array1<int32_t> *aux_labels /*= nullptr*/) {
std::istringstream is(s);
K2_CHECK(is);
if (openfst == false && aux_labels == nullptr)
return K2AcceptorFromStream(is);
else if (openfst == false && aux_labels != nullptr)
return K2TransducerFromStream(is, aux_labels);
else if (openfst == true && aux_labels == nullptr)
return OpenFstAcceptorFromStream(is);
else if (openfst == true && aux_labels != nullptr)
return OpenFstTransducerFromStream(is, aux_labels);
return Fsa(); // unreachable code
}
std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/,
const Array1<int32_t> *aux_labels /*= nullptr*/) {
K2_CHECK_EQ(fsa.NumAxes(), 2);
K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu);
const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1);
const Array1<Arc> &arcs = fsa.values;
const int32_t *p = nullptr;
if (aux_labels != nullptr) {
K2_CHECK(IsCompatible(fsa, *aux_labels));
K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim());
p = aux_labels->Data();
}
float scale = 1;
if (openfst) scale = -1;
std::ostringstream os;
int32_t n = arcs.Dim();
char sep = ' ';
char line_sep = '\n';
for (int32_t i = 0; i != n; ++i) {
const auto &arc = arcs[i];
os << arc.src_state << sep << arc.dest_state << sep << arc.symbol << sep;
if (p != nullptr) os << p[i] << sep;
os << (scale * arc.score) << line_sep;
}
os << (fsa.shape.Dim0() - 1) << line_sep;
return os.str();
}
} // namespace k2
|
f2d61dc9fe49eedd1ff28160e84af4bbfa30c839.cu
|
/**
* @brief Utilities for creating FSAs.
*
* Note that serializations are done in Python.
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
* Guoguo Chen
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <sstream>
#include <utility>
#include <vector>
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// field separator within a line for a text form FSA
static constexpr const char *kDelim = " \t";
// Convert a string to an integer. Abort the program on failure.
static int32_t StringToInt(const std::string &s) {
K2_CHECK(!s.empty());
bool ok = false;
char *p = nullptr;
// std::strtol requires a `long` type
long n = std::strtol(s.c_str(), &p, 10); // NOLINT
if (*p == '\0') ok = true;
auto res = static_cast<int32_t>(n);
if (n != res) ok = false; // out of range
K2_CHECK(ok) << "Failed to convert " << s << " to an integer";
return res;
}
// Convert a string to a float. Abort the program on failure.
// TODO(guoguo): We may run into locale problems, with comma vs. period for
// decimals. We have to test if the C code will behave the same
// w.r.t. locale as Python does.
static float StringToFloat(const std::string &s) {
K2_CHECK(!s.empty());
char *p = nullptr;
float f = std::strtof(s.c_str(), &p);
if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float";
return f;
}
// Trim leading and trailing spaces of a string.
static void TrimString(std::string *s) {
K2_CHECK_NE(s, nullptr);
auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; };
s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space));
s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end());
}
/* Split a string to a vector of strings using a set of delimiters.
Example usage:
@code
std::string in = "1 2 3";
const char *delim = " \t";
std::vector<std::string> out;
SplitStringToVector(in, delim, &out);
@endcode
@param [in] in The input string to be split.
@param [in] delim A string of delimiters.
@param [out] out It saves the split result.
*/
static void SplitStringToVector(const std::string &in, const char *delim,
std::vector<std::string> *out) {
K2_CHECK_NE(delim, nullptr);
K2_CHECK_NE(out, nullptr);
out->clear();
std::size_t start = 0;
while (true) {
auto pos = in.find_first_of(delim, start);
if (pos == std::string::npos) break;
auto sub = in.substr(start, pos - start);
start = pos + 1;
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
if (start < in.size()) {
auto sub = in.substr(start);
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
}
/* Create an acceptor from a stream, assuming the acceptor is in the k2 format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa K2AcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 4u) {
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = StringToFloat(splits[3]);
arcs.emplace_back(src_state, dest_state, symbol, score);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]); // this is a final state
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 acceptor expects a line with 1 (final_state) or "
"4 (src_state dest_state label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the K2
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa K2TransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 5u) {
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = StringToFloat(splits[4]);
arcs.emplace_back(src_state, dest_state, symbol, score);
aux_labels_internal.push_back(aux_label);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]);
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 transducer expects a line with 1 (final_state) or "
"5 (src_state dest_state label aux_label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create an acceptor from a stream, assuming the acceptor is in the OpenFST
format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FSA, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, and -1 as its label.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstAcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 3u || num_fields == 4u) {
// 0 1 2
// src_state dest_state label
//
// or
//
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = 0.0f;
if (num_fields == 4u)
score = -1.0f * StringToFloat(splits[3]);
// Add the arc to "state_to_arcs".
++num_arcs;
max_state = std::max(max_state, std::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state)
state_to_arcs.resize(src_state + 1);
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
} else if (num_fields == 1u || num_fields == 2u) {
// 0 1
// final_state score
float score = 0.0f;
if (num_fields == 2u)
score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = std::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST acceptor expects a line with 1 (final_state),"
" 2 (final_state score), 3 (src_state dest_state label) "
"or 4 (src_state dest_state label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FSA, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs".
int32_t arc_index = 0;
arcs.resize(num_arcs);
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the OpenFST
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FST, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, -1 as its label and 0 as its aux_label.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstTransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 4u || num_fields == 5u) {
// 0 1 2 3
// src_state dest_state label aux_label
//
// or
//
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = 0.0f;
if (num_fields == 5u)
score = -1.0f * StringToFloat(splits[4]);
// Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels"
++num_arcs;
max_state = std::max(max_state, std::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) {
state_to_arcs.resize(src_state + 1);
state_to_aux_labels.resize(src_state + 1);
}
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
state_to_aux_labels[src_state].push_back(aux_label);
} else if (num_fields == 1u || num_fields == 2u) {
// 0
// final_state
//
// or
//
// 0 1
// final_state score
// There could be multiple final states, so we first have to collect all
// the final states, and then work out the super final state.
float score = 0.0f;
if (num_fields == 2u)
score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = std::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST transducer expects a line with "
"1 (final_state), 2 (final_state score), "
"4 (src_state dest_state label aux_label) or "
"5 (src_state dest_state label aux_label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FST, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
state_to_aux_labels.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
// TODO(guoguo) We are not sure yet what to put as the auxiliary label for
// arcs entering the super final state. The only real choices
// are kEpsilon or kFinalSymbol. We are using kEpsilon for
// now.
state_to_aux_labels[original_final_states[i]].push_back(0); // kEpsilon
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs", and aux_labels from
// "state_to_aux_labels" to "aux_labels_internal"
int32_t arc_index = 0;
arcs.resize(num_arcs);
aux_labels_internal.resize(num_arcs);
K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size());
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size());
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
aux_labels_internal[arc_index] = state_to_aux_labels[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
Fsa FsaFromString(const std::string &s, bool openfst /*= false*/,
Array1<int32_t> *aux_labels /*= nullptr*/) {
std::istringstream is(s);
K2_CHECK(is);
if (openfst == false && aux_labels == nullptr)
return K2AcceptorFromStream(is);
else if (openfst == false && aux_labels != nullptr)
return K2TransducerFromStream(is, aux_labels);
else if (openfst == true && aux_labels == nullptr)
return OpenFstAcceptorFromStream(is);
else if (openfst == true && aux_labels != nullptr)
return OpenFstTransducerFromStream(is, aux_labels);
return Fsa(); // unreachable code
}
std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/,
const Array1<int32_t> *aux_labels /*= nullptr*/) {
K2_CHECK_EQ(fsa.NumAxes(), 2);
K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu);
const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1);
const Array1<Arc> &arcs = fsa.values;
const int32_t *p = nullptr;
if (aux_labels != nullptr) {
K2_CHECK(IsCompatible(fsa, *aux_labels));
K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim());
p = aux_labels->Data();
}
float scale = 1;
if (openfst) scale = -1;
std::ostringstream os;
int32_t n = arcs.Dim();
char sep = ' ';
char line_sep = '\n';
for (int32_t i = 0; i != n; ++i) {
const auto &arc = arcs[i];
os << arc.src_state << sep << arc.dest_state << sep << arc.symbol << sep;
if (p != nullptr) os << p[i] << sep;
os << (scale * arc.score) << line_sep;
}
os << (fsa.shape.Dim0() - 1) << line_sep;
return os.str();
}
} // namespace k2
|
db4d49d6084bccdcbe357f5d96efe1f2dd149ab4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "userFitness.h"
#include "hip/hip_runtime_api.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
__device__ float fitnessOfChromosome_TSPGA(int size, short* chromosome, int startPoint, float* coord) {
float nilai = 0;
int i_d = startPoint;
for (int j = 1; j < size; j++) {
int xDist = coord[chromosome[i_d + j] * 2] - coord[chromosome[i_d + (j - 1)] * 2];
int yDist = coord[(chromosome[i_d + j] * 2) + 1] - coord[(chromosome[i_d + (j - 1)] * 2) + 1];
nilai += (sqrtf((xDist*xDist) + (yDist*yDist)));
}
int xDist = coord[chromosome[i_d] * 2] - coord[chromosome[i_d + (size - 1)] * 2];
int yDist = coord[(chromosome[i_d] * 2) + 1] - coord[(chromosome[i_d + (size - 1)] * 2) + 1];
nilai += (sqrtf((xDist*xDist) + (yDist*yDist)));
return 99999.0f - nilai;
};
__global__ void fitnessCheckGPU_TSPGA(int size, short* chromosome, float* fitness, float* coord) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
fitness[i] = fitnessOfChromosome_TSPGA(size, chromosome, i * size, coord);
}
void callFitnessCheckGPU_TSPGA(int size, short* chromosome, float* fitness, long chromosomeAmount, float* coord) {
fitnessCheckGPU_TSPGA << < 1, chromosomeAmount >> >(size, chromosome, fitness, coord);
}
|
db4d49d6084bccdcbe357f5d96efe1f2dd149ab4.cu
|
#include "userFitness.h"
#include "cuda_runtime_api.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <stdio.h>
__device__ float fitnessOfChromosome_TSPGA(int size, short* chromosome, int startPoint, float* coord) {
float nilai = 0;
int i_d = startPoint;
for (int j = 1; j < size; j++) {
int xDist = coord[chromosome[i_d + j] * 2] - coord[chromosome[i_d + (j - 1)] * 2];
int yDist = coord[(chromosome[i_d + j] * 2) + 1] - coord[(chromosome[i_d + (j - 1)] * 2) + 1];
nilai += (sqrtf((xDist*xDist) + (yDist*yDist)));
}
int xDist = coord[chromosome[i_d] * 2] - coord[chromosome[i_d + (size - 1)] * 2];
int yDist = coord[(chromosome[i_d] * 2) + 1] - coord[(chromosome[i_d + (size - 1)] * 2) + 1];
nilai += (sqrtf((xDist*xDist) + (yDist*yDist)));
return 99999.0f - nilai;
};
__global__ void fitnessCheckGPU_TSPGA(int size, short* chromosome, float* fitness, float* coord) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
fitness[i] = fitnessOfChromosome_TSPGA(size, chromosome, i * size, coord);
}
void callFitnessCheckGPU_TSPGA(int size, short* chromosome, float* fitness, long chromosomeAmount, float* coord) {
fitnessCheckGPU_TSPGA << < 1, chromosomeAmount >> >(size, chromosome, fitness, coord);
}
|
e515d14a64f3f95679b16421df1b405d3809fab0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ************************************************
// asian_option_kernel.cu
// authors: Lee Howes and David B. Thomas
//
// Initialises, executes, and cleans up an
// Asian option performance test timing
// and outputting timing results for various
// aspects of the execution including memory
// allocation, initialisation and generation.
// ************************************************
#ifndef _ASIAN_OPTIONS_CU_
#define _ASIAN_OPTIONS_CU_
float *asian_A_0, *asian_B_0, *asian_MU_A, *asian_SIG_AA, *asian_MU_B, *asian_SIG_AB, *asian_SIG_BB;
float *device_asian_A_0, *device_asian_B_0, *device_asian_MU_A, *device_asian_SIG_AA, *device_asian_MU_B, *device_asian_SIG_AB, *device_asian_SIG_BB;
float *asianSimulationResultsMean, *asianSimulationResultsVariance;
float *device_asianSimulationResultsMean, *device_asianSimulationResultsVariance;
float *asianChi2Corrections = 0;
float *deviceAsianChi2Corrections = 0;
unsigned int timer_asian_tw = 0;
unsigned int timer_asian_wallace = 0;
unsigned int timer_asian_init = 0;
unsigned int timer_asian_upload = 0;
unsigned int timer_asian_download = 0;
unsigned int timer_asian_malloc = 0;
unsigned int timer_asian_cuda_malloc = 0;
unsigned int timer_asian_free = 0;
unsigned int timer_asian_cuda_free = 0;
void init_asian_options()
{
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_tw));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_wallace));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_init));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_upload));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_download));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_malloc));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_free));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_cuda_free));
CUT_SAFE_CALL(cutStartTimer(timer_asian_malloc));
// Asian option memory allocations
asian_A_0 = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_B_0 = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_MU_A = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_AA = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_MU_B = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_AB = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_BB = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asianChi2Corrections = (float *) malloc(4 * ASIAN_WALLACE_CHI2_COUNT);
asianSimulationResultsMean = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asianSimulationResultsVariance = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
CUT_SAFE_CALL(cutStopTimer(timer_asian_malloc));
CUT_SAFE_CALL(cutStartTimer(timer_asian_cuda_malloc));
// Asian option memory allocations
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_A_0, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_B_0, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_MU_A, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_SIG_AA, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_MU_B, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_SIG_AB, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asian_SIG_BB, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceAsianChi2Corrections, 4 * ASIAN_WALLACE_CHI2_COUNT));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asianSimulationResultsMean, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(hipMalloc((void **) &device_asianSimulationResultsVariance, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUT_SAFE_CALL(cutStopTimer(timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutStartTimer(timer_asian_init));
// Initialise asian option parameters, random guesses at this point...
for (unsigned i = 0; i < ASIAN_NUM_PARAMETER_VALUES; i++)
{
asian_A_0[i] = Rand();
asian_B_0[i] = Rand();
asian_MU_A[i] = Rand();
asian_MU_B[i] = Rand();
asian_SIG_AA[i] = Rand();
asian_SIG_AB[i] = Rand();
asian_SIG_BB[i] = Rand();
}
for (int i = 0; i < ASIAN_WALLACE_CHI2_COUNT; i++)
{
asianChi2Corrections[i] = MakeChi2Scale(WALLACE_TOTAL_POOL_SIZE);
}
CUT_SAFE_CALL(cutStopTimer(timer_asian_init));
CUT_SAFE_CALL(cutStartTimer(timer_asian_upload));
CUDA_SAFE_CALL(hipMemcpy(device_asian_A_0, asian_A_0, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_B_0, asian_B_0, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_MU_A, asian_MU_A, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_MU_B, asian_MU_B, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_SIG_AA, asian_SIG_AA, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_SIG_AB, asian_SIG_AB, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(device_asian_SIG_BB, asian_SIG_BB, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(deviceAsianChi2Corrections, asianChi2Corrections, 4 * ASIAN_WALLACE_CHI2_COUNT, hipMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_asian_upload));
}
void cleanup_asian_options()
{
CUT_SAFE_CALL(cutStartTimer(timer_asian_free));
// Asian option memory allocations
free(asian_A_0);
free(asian_B_0);
free(asian_MU_A);
free(asian_SIG_AA);
free(asian_MU_B);
free(asian_SIG_AB);
free(asian_SIG_BB);
free(asianChi2Corrections);
free(asianSimulationResultsMean);
free(asianSimulationResultsVariance);
CUT_SAFE_CALL(cutStopTimer(timer_asian_free));
CUT_SAFE_CALL(cutStartTimer(timer_asian_cuda_free));
// Asian option memory allocations
CUDA_SAFE_CALL(hipFree(device_asian_A_0));
CUDA_SAFE_CALL(hipFree(device_asian_B_0));
CUDA_SAFE_CALL(hipFree(device_asian_MU_A));
CUDA_SAFE_CALL(hipFree(device_asian_SIG_AA));
CUDA_SAFE_CALL(hipFree(device_asian_MU_B));
CUDA_SAFE_CALL(hipFree(device_asian_SIG_AB));
CUDA_SAFE_CALL(hipFree(device_asian_SIG_BB));
CUDA_SAFE_CALL(hipFree(deviceAsianChi2Corrections));
CUDA_SAFE_CALL(hipFree(device_asianSimulationResultsMean));
CUDA_SAFE_CALL(hipFree(device_asianSimulationResultsVariance));
CUT_SAFE_CALL(cutStopTimer(timer_asian_cuda_free));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_tw));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_wallace));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_init));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_upload));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_download));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_malloc));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_free));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_cuda_free));
}
__device__ float
wallace_asian_basket_sim(unsigned seed,
unsigned &loop, float *chi2Corrections, float A_0, float B_0, float MU_A, float SIG_AA, float MU_B, float SIG_AB, float SIG_BB)
{
float a = A_0, b = B_0, s = 0, sum = 0;
// Timesteps for a single simulation
for (unsigned t = 0; t < (ASIAN_TIME_STEPS / 4); t++)
{
float ra, rb;
// Read in the chi2Correction value only in a single thread and use shared memory to broadcast to other threads
if (threadIdx.x == 0)
pool[WALLACE_CHI2_OFFSET_ASIAN] = chi2Corrections[__mul24(blockIdx.x, ASIAN_WALLACE_CHI2_VALUES_PER_BLOCK) + loop];
__syncthreads();
float chi2CorrAndScale = pool[WALLACE_CHI2_OFFSET_ASIAN];
for (int i = 0; i < 8; i += 2)
{
seed = (1664525U * seed + 1013904223U) & 0xFFFFFFFF;
ra = getRandomValue(i, loop, chi2CorrAndScale);
rb = getRandomValue(i + 1, loop, chi2CorrAndScale);
a *= exp(MU_A + ra * SIG_AA);
b *= exp(MU_B + ra * SIG_AB + rb * SIG_BB);
s = max(a, b);
sum += s;
}
// Count up and temporarily store loop value to ease register on transform
loop++;
// Transform the pool
transform_pool(seed);
}
return max((sum / ASIAN_TIME_STEPS) - s, (float) 0.0);
}
__global__ void wallace_asian_basket(unsigned seed,
float *chi2Corrections,
float *globalPool,
float *simulationResultsMean,
float *simulationResultsVariance,
float *g_A_0, float *g_B_0, float *g_MU_A, float *g_SIG_AA, float *g_MU_B, float *g_SIG_AB, float *g_SIG_BB)
{
// Initialise loop and temporary loop storage
unsigned loop = 0;
// Define and load parameters for simulation
float A_0, B_0, MU_A, SIG_AA, MU_B, SIG_AB, SIG_BB;
unsigned address = (blockIdx.x * WALLACE_NUM_THREADS + threadIdx.x);
A_0 = g_A_0[address];
B_0 = g_B_0[address];
MU_A = g_MU_A[address];
MU_B = g_MU_B[address];
SIG_AA = g_SIG_AA[address];
SIG_AB = g_SIG_AB[address];
SIG_BB = g_SIG_BB[address];
// Initialise generator
initialise_wallace(seed, globalPool);
__syncthreads();
float mean = 0, varAcc = 0;
for (float i = 1; i <= ASIAN_PATHS_PER_SIM; i++)
{
float res = wallace_asian_basket_sim(seed, loop, chi2Corrections, A_0, B_0,
MU_A, MU_B, SIG_AA, SIG_AB, SIG_BB);
// update mean and variance in a numerically stable way
float delta = res - mean;
mean += delta / i;
varAcc += delta * (res - mean);
}
simulationResultsMean[address] = mean;
float variance = varAcc / (ASIAN_PATHS_PER_SIM - 1);
simulationResultsVariance[address] = variance;
}
__device__ float tausworthe_asian_basket_sim(float A_0, float B_0, float MU_A, float SIG_AA, float MU_B, float SIG_AB, float SIG_BB, unsigned &z1, unsigned &z2,
unsigned &z3, unsigned &z4)
{
float a = A_0, b = B_0, s = 0, sum = 0;
float temp_random_value;
// Timesteps for a single simulation
// Divide by 4 because we then do an internal loop 4 times
for (unsigned t = 0; t < (ASIAN_TIME_STEPS / 4); t++)
{
float ra, rb;
for (int i = 0; i < 8; i += 2)
{
ra = getRandomValueTausworthe(z1, z2, z3, z4, temp_random_value, 0);
rb = getRandomValueTausworthe(z1, z2, z3, z4, temp_random_value, 1);
a *= exp(MU_A + ra * SIG_AA);
b *= exp(MU_B + ra * SIG_AB + rb * SIG_BB);
s = max(a, b);
sum += s;
}
}
return max((sum / ASIAN_TIME_STEPS) - s, (float) 0.0);
}
__global__ void tausworthe_asian_basket(unsigned int *seedValues,
float *simulationResultsMean,
float *simulationResultsVariance,
float *g_A_0, float *g_B_0, float *g_MU_A, float *g_SIG_AA, float *g_MU_B, float *g_SIG_AB, float *g_SIG_BB)
{
// RNG state
unsigned z1, z2, z3, z4;
// Initialise tausworth with seeds
z1 = seedValues[threadIdx.x];
z2 = seedValues[TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
z3 = seedValues[2 * TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
z4 = seedValues[3 * TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
// Define and load parameters for simulation
float A_0, B_0, MU_A, SIG_AA, MU_B, SIG_AB, SIG_BB;
unsigned address = (blockIdx.x * TAUSWORTHE_NUM_THREADS + threadIdx.x);
A_0 = g_A_0[address];
B_0 = g_B_0[address];
MU_A = g_MU_A[address];
MU_B = g_MU_B[address];
SIG_AA = g_SIG_AA[address];
SIG_AB = g_SIG_AB[address];
SIG_BB = g_SIG_BB[address];
float mean = 0, varAcc = 0;
for (float i = 1; i <= ASIAN_PATHS_PER_SIM; i++)
{
float res = tausworthe_asian_basket_sim(A_0, B_0, MU_A, MU_B, SIG_AA, SIG_AB,
SIG_BB, z1, z2, z3, z4);
// update mean and variance in a numerically stable way
float delta = res - mean;
mean += delta / i;
varAcc += delta * (res - mean);
}
simulationResultsMean[address] = mean;
float variance = varAcc / (ASIAN_PATHS_PER_SIM - 1);
simulationResultsVariance[address] = variance;
}
void computeAsianOptions()
{
init_asian_options();
// setup execution parameters and execute
dim3 asian_tausworth_grid(TAUSWORTHE_NUM_BLOCKS, 1, 1);
dim3 asian_tausworth_threads(TAUSWORTHE_NUM_THREADS, 1, 1);
dim3 asian_wallace_grid(WALLACE_NUM_BLOCKS, 1, 1);
dim3 asian_wallace_threads(WALLACE_NUM_THREADS, 1, 1);
// Execute the Tausworthe version of the code, timing as we go
CUT_SAFE_CALL(cutStartTimer(timer_asian_tw));
hipLaunchKernelGGL(( tausworthe_asian_basket) , dim3(asian_tausworth_grid), dim3(asian_tausworth_threads),
0 , 0, deviceTauswortheSeeds, device_asianSimulationResultsMean,
device_asianSimulationResultsVariance, device_asian_A_0,
device_asian_B_0, device_asian_MU_A, device_asian_SIG_AA, device_asian_MU_B, device_asian_SIG_AB, device_asian_SIG_BB);
CUT_SAFE_CALL(cutStopTimer(timer_asian_tw));
CUT_CHECK_ERROR("Kernel execution failed: asian tausworthe");
unsigned seed = 1;
CUT_SAFE_CALL(cutStartTimer(timer_asian_wallace));
// Execute the Wallace version of the code, timing as we go
// Extra shared memory space to store loop counter temporarily to ease register pressure
hipLaunchKernelGGL(( wallace_asian_basket) , dim3(asian_wallace_grid), dim3(asian_wallace_threads),
WALLACE_POOL_SIZE * 4 + WALLACE_NUM_THREADS * 4 +
WALLACE_CHI2_SHARED_SIZE * 4 , 0, seed, deviceAsianChi2Corrections,
devPool,
device_asianSimulationResultsMean,
device_asianSimulationResultsVariance,
device_asian_A_0, device_asian_B_0,
device_asian_MU_A, device_asian_SIG_AA, device_asian_MU_B, device_asian_SIG_AB, device_asian_SIG_BB);
CUT_SAFE_CALL(cutStopTimer(timer_asian_wallace));
// check if kernel execution generated an error
CUT_CHECK_ERROR("Kernel execution failed: asian wallace");
CUT_SAFE_CALL(cutStartTimer(timer_asian_download));
CUDA_SAFE_CALL(hipMemcpy(asianSimulationResultsMean, device_asianSimulationResultsMean, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(asianSimulationResultsVariance, device_asianSimulationResultsVariance, 4 * ASIAN_NUM_PARAMETER_VALUES, hipMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(timer_asian_download));
printf("\n\nAsian option results:\n");
printf
("Processing time for asian initialisation code: %f (ms) for %d Simulations, %f MSimulations/sec\n",
cutGetTimerValue(timer_asian_init), ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM,
ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM / (cutGetTimerValue(timer_asian_init) / 1000.0) / 1000000.0);
printf
("Processing time for asian tausworthe: %f (ms) for %d Steps, %f MSteps/sec\n",
cutGetTimerValue(timer_asian_tw), ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM * ASIAN_TIME_STEPS,
ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM * ASIAN_TIME_STEPS / (cutGetTimerValue(timer_asian_tw) / 1000.0) / 1000000.0);
printf("Processing time for asian wallace: %f (ms) for %d Simulations, %f Simulations/sec\n", cutGetTimerValue(timer_asian_wallace),
ASIAN_NUM_PARAMETER_VALUES, ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_wallace) / 1000.0) / 1000000.0);
printf("Upload time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_upload),
4 * ASIAN_WALLACE_CHI2_COUNT + 7 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_upload))) / 1000.0);
printf("Download time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_download),
4 * ASIAN_WALLACE_CHI2_COUNT + 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_download))) / 1000.0);
printf("Malloc time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_malloc),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_malloc))) / 1000.0);
printf("hipMalloc time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_cuda_malloc),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 7 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_cuda_malloc))) / 1000.0);
printf("free time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_free),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_free))) / 1000.0);
printf("hipFree time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_cuda_free),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_cuda_free))) / 1000.0);
cleanup_asian_options();
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
|
e515d14a64f3f95679b16421df1b405d3809fab0.cu
|
// ************************************************
// asian_option_kernel.cu
// authors: Lee Howes and David B. Thomas
//
// Initialises, executes, and cleans up an
// Asian option performance test timing
// and outputting timing results for various
// aspects of the execution including memory
// allocation, initialisation and generation.
// ************************************************
#ifndef _ASIAN_OPTIONS_CU_
#define _ASIAN_OPTIONS_CU_
float *asian_A_0, *asian_B_0, *asian_MU_A, *asian_SIG_AA, *asian_MU_B, *asian_SIG_AB, *asian_SIG_BB;
float *device_asian_A_0, *device_asian_B_0, *device_asian_MU_A, *device_asian_SIG_AA, *device_asian_MU_B, *device_asian_SIG_AB, *device_asian_SIG_BB;
float *asianSimulationResultsMean, *asianSimulationResultsVariance;
float *device_asianSimulationResultsMean, *device_asianSimulationResultsVariance;
float *asianChi2Corrections = 0;
float *deviceAsianChi2Corrections = 0;
unsigned int timer_asian_tw = 0;
unsigned int timer_asian_wallace = 0;
unsigned int timer_asian_init = 0;
unsigned int timer_asian_upload = 0;
unsigned int timer_asian_download = 0;
unsigned int timer_asian_malloc = 0;
unsigned int timer_asian_cuda_malloc = 0;
unsigned int timer_asian_free = 0;
unsigned int timer_asian_cuda_free = 0;
void init_asian_options()
{
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_tw));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_wallace));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_init));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_upload));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_download));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_malloc));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_free));
CUT_SAFE_CALL(cutCreateTimer(&timer_asian_cuda_free));
CUT_SAFE_CALL(cutStartTimer(timer_asian_malloc));
// Asian option memory allocations
asian_A_0 = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_B_0 = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_MU_A = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_AA = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_MU_B = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_AB = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asian_SIG_BB = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asianChi2Corrections = (float *) malloc(4 * ASIAN_WALLACE_CHI2_COUNT);
asianSimulationResultsMean = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
asianSimulationResultsVariance = (float *) malloc(4 * ASIAN_NUM_PARAMETER_VALUES);
CUT_SAFE_CALL(cutStopTimer(timer_asian_malloc));
CUT_SAFE_CALL(cutStartTimer(timer_asian_cuda_malloc));
// Asian option memory allocations
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_A_0, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_B_0, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_MU_A, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_SIG_AA, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_MU_B, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_SIG_AB, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asian_SIG_BB, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceAsianChi2Corrections, 4 * ASIAN_WALLACE_CHI2_COUNT));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asianSimulationResultsMean, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUDA_SAFE_CALL(cudaMalloc((void **) &device_asianSimulationResultsVariance, 4 * ASIAN_NUM_PARAMETER_VALUES));
CUT_SAFE_CALL(cutStopTimer(timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutStartTimer(timer_asian_init));
// Initialise asian option parameters, random guesses at this point...
for (unsigned i = 0; i < ASIAN_NUM_PARAMETER_VALUES; i++)
{
asian_A_0[i] = Rand();
asian_B_0[i] = Rand();
asian_MU_A[i] = Rand();
asian_MU_B[i] = Rand();
asian_SIG_AA[i] = Rand();
asian_SIG_AB[i] = Rand();
asian_SIG_BB[i] = Rand();
}
for (int i = 0; i < ASIAN_WALLACE_CHI2_COUNT; i++)
{
asianChi2Corrections[i] = MakeChi2Scale(WALLACE_TOTAL_POOL_SIZE);
}
CUT_SAFE_CALL(cutStopTimer(timer_asian_init));
CUT_SAFE_CALL(cutStartTimer(timer_asian_upload));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_A_0, asian_A_0, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_B_0, asian_B_0, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_MU_A, asian_MU_A, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_MU_B, asian_MU_B, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_SIG_AA, asian_SIG_AA, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_SIG_AB, asian_SIG_AB, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(device_asian_SIG_BB, asian_SIG_BB, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(deviceAsianChi2Corrections, asianChi2Corrections, 4 * ASIAN_WALLACE_CHI2_COUNT, cudaMemcpyHostToDevice));
CUT_SAFE_CALL(cutStopTimer(timer_asian_upload));
}
void cleanup_asian_options()
{
CUT_SAFE_CALL(cutStartTimer(timer_asian_free));
// Asian option memory allocations
free(asian_A_0);
free(asian_B_0);
free(asian_MU_A);
free(asian_SIG_AA);
free(asian_MU_B);
free(asian_SIG_AB);
free(asian_SIG_BB);
free(asianChi2Corrections);
free(asianSimulationResultsMean);
free(asianSimulationResultsVariance);
CUT_SAFE_CALL(cutStopTimer(timer_asian_free));
CUT_SAFE_CALL(cutStartTimer(timer_asian_cuda_free));
// Asian option memory allocations
CUDA_SAFE_CALL(cudaFree(device_asian_A_0));
CUDA_SAFE_CALL(cudaFree(device_asian_B_0));
CUDA_SAFE_CALL(cudaFree(device_asian_MU_A));
CUDA_SAFE_CALL(cudaFree(device_asian_SIG_AA));
CUDA_SAFE_CALL(cudaFree(device_asian_MU_B));
CUDA_SAFE_CALL(cudaFree(device_asian_SIG_AB));
CUDA_SAFE_CALL(cudaFree(device_asian_SIG_BB));
CUDA_SAFE_CALL(cudaFree(deviceAsianChi2Corrections));
CUDA_SAFE_CALL(cudaFree(device_asianSimulationResultsMean));
CUDA_SAFE_CALL(cudaFree(device_asianSimulationResultsVariance));
CUT_SAFE_CALL(cutStopTimer(timer_asian_cuda_free));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_tw));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_wallace));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_init));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_upload));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_download));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_malloc));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_cuda_malloc));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_free));
CUT_SAFE_CALL(cutDeleteTimer(timer_asian_cuda_free));
}
__device__ float
wallace_asian_basket_sim(unsigned seed,
unsigned &loop, float *chi2Corrections, float A_0, float B_0, float MU_A, float SIG_AA, float MU_B, float SIG_AB, float SIG_BB)
{
float a = A_0, b = B_0, s = 0, sum = 0;
// Timesteps for a single simulation
for (unsigned t = 0; t < (ASIAN_TIME_STEPS / 4); t++)
{
float ra, rb;
// Read in the chi2Correction value only in a single thread and use shared memory to broadcast to other threads
if (threadIdx.x == 0)
pool[WALLACE_CHI2_OFFSET_ASIAN] = chi2Corrections[__mul24(blockIdx.x, ASIAN_WALLACE_CHI2_VALUES_PER_BLOCK) + loop];
__syncthreads();
float chi2CorrAndScale = pool[WALLACE_CHI2_OFFSET_ASIAN];
for (int i = 0; i < 8; i += 2)
{
seed = (1664525U * seed + 1013904223U) & 0xFFFFFFFF;
ra = getRandomValue(i, loop, chi2CorrAndScale);
rb = getRandomValue(i + 1, loop, chi2CorrAndScale);
a *= exp(MU_A + ra * SIG_AA);
b *= exp(MU_B + ra * SIG_AB + rb * SIG_BB);
s = max(a, b);
sum += s;
}
// Count up and temporarily store loop value to ease register on transform
loop++;
// Transform the pool
transform_pool(seed);
}
return max((sum / ASIAN_TIME_STEPS) - s, (float) 0.0);
}
__global__ void wallace_asian_basket(unsigned seed,
float *chi2Corrections,
float *globalPool,
float *simulationResultsMean,
float *simulationResultsVariance,
float *g_A_0, float *g_B_0, float *g_MU_A, float *g_SIG_AA, float *g_MU_B, float *g_SIG_AB, float *g_SIG_BB)
{
// Initialise loop and temporary loop storage
unsigned loop = 0;
// Define and load parameters for simulation
float A_0, B_0, MU_A, SIG_AA, MU_B, SIG_AB, SIG_BB;
unsigned address = (blockIdx.x * WALLACE_NUM_THREADS + threadIdx.x);
A_0 = g_A_0[address];
B_0 = g_B_0[address];
MU_A = g_MU_A[address];
MU_B = g_MU_B[address];
SIG_AA = g_SIG_AA[address];
SIG_AB = g_SIG_AB[address];
SIG_BB = g_SIG_BB[address];
// Initialise generator
initialise_wallace(seed, globalPool);
__syncthreads();
float mean = 0, varAcc = 0;
for (float i = 1; i <= ASIAN_PATHS_PER_SIM; i++)
{
float res = wallace_asian_basket_sim(seed, loop, chi2Corrections, A_0, B_0,
MU_A, MU_B, SIG_AA, SIG_AB, SIG_BB);
// update mean and variance in a numerically stable way
float delta = res - mean;
mean += delta / i;
varAcc += delta * (res - mean);
}
simulationResultsMean[address] = mean;
float variance = varAcc / (ASIAN_PATHS_PER_SIM - 1);
simulationResultsVariance[address] = variance;
}
__device__ float tausworthe_asian_basket_sim(float A_0, float B_0, float MU_A, float SIG_AA, float MU_B, float SIG_AB, float SIG_BB, unsigned &z1, unsigned &z2,
unsigned &z3, unsigned &z4)
{
float a = A_0, b = B_0, s = 0, sum = 0;
float temp_random_value;
// Timesteps for a single simulation
// Divide by 4 because we then do an internal loop 4 times
for (unsigned t = 0; t < (ASIAN_TIME_STEPS / 4); t++)
{
float ra, rb;
for (int i = 0; i < 8; i += 2)
{
ra = getRandomValueTausworthe(z1, z2, z3, z4, temp_random_value, 0);
rb = getRandomValueTausworthe(z1, z2, z3, z4, temp_random_value, 1);
a *= exp(MU_A + ra * SIG_AA);
b *= exp(MU_B + ra * SIG_AB + rb * SIG_BB);
s = max(a, b);
sum += s;
}
}
return max((sum / ASIAN_TIME_STEPS) - s, (float) 0.0);
}
__global__ void tausworthe_asian_basket(unsigned int *seedValues,
float *simulationResultsMean,
float *simulationResultsVariance,
float *g_A_0, float *g_B_0, float *g_MU_A, float *g_SIG_AA, float *g_MU_B, float *g_SIG_AB, float *g_SIG_BB)
{
// RNG state
unsigned z1, z2, z3, z4;
// Initialise tausworth with seeds
z1 = seedValues[threadIdx.x];
z2 = seedValues[TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
z3 = seedValues[2 * TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
z4 = seedValues[3 * TAUSWORTHE_TOTAL_NUM_THREADS + threadIdx.x];
// Define and load parameters for simulation
float A_0, B_0, MU_A, SIG_AA, MU_B, SIG_AB, SIG_BB;
unsigned address = (blockIdx.x * TAUSWORTHE_NUM_THREADS + threadIdx.x);
A_0 = g_A_0[address];
B_0 = g_B_0[address];
MU_A = g_MU_A[address];
MU_B = g_MU_B[address];
SIG_AA = g_SIG_AA[address];
SIG_AB = g_SIG_AB[address];
SIG_BB = g_SIG_BB[address];
float mean = 0, varAcc = 0;
for (float i = 1; i <= ASIAN_PATHS_PER_SIM; i++)
{
float res = tausworthe_asian_basket_sim(A_0, B_0, MU_A, MU_B, SIG_AA, SIG_AB,
SIG_BB, z1, z2, z3, z4);
// update mean and variance in a numerically stable way
float delta = res - mean;
mean += delta / i;
varAcc += delta * (res - mean);
}
simulationResultsMean[address] = mean;
float variance = varAcc / (ASIAN_PATHS_PER_SIM - 1);
simulationResultsVariance[address] = variance;
}
void computeAsianOptions()
{
init_asian_options();
// setup execution parameters and execute
dim3 asian_tausworth_grid(TAUSWORTHE_NUM_BLOCKS, 1, 1);
dim3 asian_tausworth_threads(TAUSWORTHE_NUM_THREADS, 1, 1);
dim3 asian_wallace_grid(WALLACE_NUM_BLOCKS, 1, 1);
dim3 asian_wallace_threads(WALLACE_NUM_THREADS, 1, 1);
// Execute the Tausworthe version of the code, timing as we go
CUT_SAFE_CALL(cutStartTimer(timer_asian_tw));
tausworthe_asian_basket <<< asian_tausworth_grid, asian_tausworth_threads,
0 >>> (deviceTauswortheSeeds, device_asianSimulationResultsMean,
device_asianSimulationResultsVariance, device_asian_A_0,
device_asian_B_0, device_asian_MU_A, device_asian_SIG_AA, device_asian_MU_B, device_asian_SIG_AB, device_asian_SIG_BB);
CUT_SAFE_CALL(cutStopTimer(timer_asian_tw));
CUT_CHECK_ERROR("Kernel execution failed: asian tausworthe");
unsigned seed = 1;
CUT_SAFE_CALL(cutStartTimer(timer_asian_wallace));
// Execute the Wallace version of the code, timing as we go
// Extra shared memory space to store loop counter temporarily to ease register pressure
wallace_asian_basket <<< asian_wallace_grid, asian_wallace_threads,
WALLACE_POOL_SIZE * 4 + WALLACE_NUM_THREADS * 4 +
WALLACE_CHI2_SHARED_SIZE * 4 >>> (seed, deviceAsianChi2Corrections,
devPool,
device_asianSimulationResultsMean,
device_asianSimulationResultsVariance,
device_asian_A_0, device_asian_B_0,
device_asian_MU_A, device_asian_SIG_AA, device_asian_MU_B, device_asian_SIG_AB, device_asian_SIG_BB);
CUT_SAFE_CALL(cutStopTimer(timer_asian_wallace));
// check if kernel execution generated an error
CUT_CHECK_ERROR("Kernel execution failed: asian wallace");
CUT_SAFE_CALL(cutStartTimer(timer_asian_download));
CUDA_SAFE_CALL(cudaMemcpy(asianSimulationResultsMean, device_asianSimulationResultsMean, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(asianSimulationResultsVariance, device_asianSimulationResultsVariance, 4 * ASIAN_NUM_PARAMETER_VALUES, cudaMemcpyDeviceToHost));
CUT_SAFE_CALL(cutStopTimer(timer_asian_download));
printf("\n\nAsian option results:\n");
printf
("Processing time for asian initialisation code: %f (ms) for %d Simulations, %f MSimulations/sec\n",
cutGetTimerValue(timer_asian_init), ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM,
ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM / (cutGetTimerValue(timer_asian_init) / 1000.0) / 1000000.0);
printf
("Processing time for asian tausworthe: %f (ms) for %d Steps, %f MSteps/sec\n",
cutGetTimerValue(timer_asian_tw), ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM * ASIAN_TIME_STEPS,
ASIAN_NUM_PARAMETER_VALUES * ASIAN_PATHS_PER_SIM * ASIAN_TIME_STEPS / (cutGetTimerValue(timer_asian_tw) / 1000.0) / 1000000.0);
printf("Processing time for asian wallace: %f (ms) for %d Simulations, %f Simulations/sec\n", cutGetTimerValue(timer_asian_wallace),
ASIAN_NUM_PARAMETER_VALUES, ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_wallace) / 1000.0) / 1000000.0);
printf("Upload time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_upload),
4 * ASIAN_WALLACE_CHI2_COUNT + 7 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_upload))) / 1000.0);
printf("Download time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_download),
4 * ASIAN_WALLACE_CHI2_COUNT + 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_download))) / 1000.0);
printf("Malloc time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_malloc),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_malloc))) / 1000.0);
printf("cudaMalloc time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_cuda_malloc),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 7 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_cuda_malloc))) / 1000.0);
printf("free time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_free),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_free))) / 1000.0);
printf("cudaFree time for asian options: %f (ms) for %d bytes, %f MB/sec\n", cutGetTimerValue(timer_asian_cuda_free),
4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES,
(4 * ASIAN_WALLACE_CHI2_COUNT + 9 * 4 * ASIAN_NUM_PARAMETER_VALUES / (cutGetTimerValue(timer_asian_cuda_free))) / 1000.0);
cleanup_asian_options();
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
|
d59f02f77627c10dad577994873aa423ccec341a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "SDL_image.h"
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cuStopwatch.cu"
#include <iterator>
#include <iostream>
enum conv_t{
CONV_IDENTITY,
CONV_EDGE,
CONV_SHARP,
CONV_GAUSS
};
SDL_Window *screen;
SDL_Window *screen_res;
SDL_Renderer *ren;
SDL_Renderer *ren_res;
SDL_Texture *tex;
SDL_Texture *tex_res;
SDL_Surface *surf;
int32_t width, height;
float filter[9];
float __constant__ filter_device[9];
// Check if the index on the row is reachable
__device__ int checkRowPixels(int row, int currentIdx, int width, int height) {
if(row < 0 || row == height) {
return 0;
}
return width * 3 * currentIdx;
}
// Check if the index on the column is reachable
__device__ int checkColumnPixels(int column, int currentIdy, int width) {
if(column < 0 || column == width) {
return 0;
}
return currentIdy * 3;
}
// Compute and apply on the picture the given filter
__device__ char compute_filter(const unsigned char* src, int tidx, int tidy, int w, int h, int32_t color) {
int result = 0;
int row = tidx / w; // representation of 2D coordinate x
int column = tidy / h; // representation of 2D coordinate y
for(int i = -1; i <= 1 ; i++){
row = checkRowPixels(row + i, i, w, h); // check if row case we want to retrieve is outside the table
for(int j = -1; j <= 1; j++) {
column = checkColumnPixels(column + j, j, w); // check if column case we want to retrieve is outside the table
result += filter_device[(i+1)*3+(j+1)] * src[tidx*3 + tidy*3 + row + column + color]; // tids * 3 / three colors represented
// operation to get the good index (0, 1, 2...)
}
}
return (result < 0 ? 0 : (result > 255 ? 255 : result)); // the value of a color of a pixel can range from 0 to 255
// so if we want a nice image we have to truncate the value
}
/**
Apply the given filter on the given image stored in the global memory
*/
__global__ void conv_global(const unsigned char* src, unsigned char* dest, int32_t w, int32_t h){
int tIdx = blockDim.x * blockIdx.x + threadIdx.x; // identifiant of thread x
int tIdy = blockDim.y * blockIdx.y + threadIdx.y; // identifiant of thread y
if(tIdx < w*h*3) { // check if we are in the image to compute the filter
for(int color = 0; color < 3; color++) { // loop on the three colors
dest[tIdx*3 + color] = compute_filter(src, tIdx, tIdy, w, h, color); // compute and store in the dest the value of each pixel after passed the filter
}
}
}
// cudaAlloc/ cudaMemoryCopy from the host and back to the host
float conv_global_gpu(unsigned char* pixels, int32_t w, int32_t h){
// todo: write the code that manages memory (global memory) and invokes the kernel conv_global, it should return the running time
int size = w * h * 3;
unsigned char *pixels_dev, *output_dev, *host_pixels;
cuStopwatch timer;
// start the timer
timer.start();
hipHostMalloc((void **)&host_pixels, size * sizeof(unsigned char), hipHostMallocDefault); // Allocate Host memory of the image from the device / get the image from the host and transfer to the gpu
hipMalloc((void **)&pixels_dev, size * sizeof(unsigned char)); // allocate the original image for the GPU
hipMalloc((void **)&output_dev, size * sizeof(unsigned char)); // allocate image filtered for the GPU
memcpy(host_pixels, pixels, size * sizeof(unsigned char)); // copy the content picture on the host
hipMemcpy(pixels_dev, host_pixels, size * sizeof(unsigned char), hipMemcpyHostToDevice); // copy the content of the host on the allocated array
// allow to transfer the picture from the host memory to the device memory
int block_number = ceil((h * w) / 1024); // ideal number of thread per block
hipLaunchKernelGGL(( conv_global), dim3(block_number), dim3(1024), 0, 0, pixels_dev, output_dev, w, h); // applying the filter
hipMemcpy(host_pixels, output_dev, size * sizeof(unsigned char), hipMemcpyDeviceToHost); // transfer the new picture filtered from the device to the host memory
memcpy(pixels, host_pixels, size);
// Free Memory allocated
hipHostFree(host_pixels);
hipFree(pixels_dev);
hipFree(output_dev);
return timer.stop(); // stop the timer
}
__global__ void conv_texture(hipTextureObject_t src, unsigned char* dest, int32_t w, int32_t h){
// todo: write a kernel to apply the given filter on the given image stored as a texture
}
float conv_texture_gpu(unsigned char* pixels, int32_t w, int32_t h){
// todo: write the code that manages memory (texture memory) and invokes the kernel conv_global, it should return the running time
return 0.0;
}
int main(int argc, char** argv){
SDL_Event event;
bool withtex = false;
// Initialize SDL
if( SDL_Init(SDL_INIT_VIDEO) < 0 ) {
fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError());
exit(1);
}
atexit(SDL_Quit);
if(argc == 1){
exit(1);
}
// Read image and option
IMG_Init(IMG_INIT_PNG);
surf = IMG_Load(argv[1]);
if(surf == NULL){
fprintf(stderr, "Error loading image.\n");
exit(1);
}
width = surf->w;
height = surf->h;
SDL_SetSurfaceRLE(surf, 1);
// Initialize involution kernel
conv_t conv_type;
if(argc >= 3){
if (strcmp(argv[2], "identity") == 0) conv_type = CONV_IDENTITY;
else if (strcmp(argv[2], "edge") == 0) conv_type= CONV_EDGE;
else if (strcmp(argv[2], "sharp") == 0) conv_type= CONV_SHARP;
else if (strcmp(argv[2], "gauss") == 0) conv_type = CONV_GAUSS;
else conv_type = CONV_IDENTITY;
}
switch(conv_type){
case CONV_EDGE:
filter[0] = -1; filter[1] = -1; filter[2] = -1;
filter[3] = -1; filter[4] = 8; filter[5] = -1;
filter[6] = -1; filter[7] = -1; filter[8] = -1;
break;
case CONV_SHARP:
filter[0] = 0; filter[1] = -1; filter[2] = 0;
filter[3] = -1; filter[4] = 5; filter[5] = -1;
filter[6] = 0; filter[7] = -1; filter[8] = 0;
break;
case CONV_GAUSS:
filter[0] = 1.0f/16; filter[1] = 1.0f/8; filter[2] = 1.0f/16;
filter[3] = 1.0f/8; filter[4] = 1.0f/4; filter[5] = 1.0f/8;
filter[6] = 1.0f/16; filter[7] = 1.0f/8; filter[8] = 1.0f/8;
break;
default:
filter[0] = 0; filter[1] = 0; filter[2] = 0;
filter[3] = 0; filter[4] = 1; filter[5] = 0;
filter[6] = 0; filter[7] = 0; filter[8] = 0;
break;
}
hipMemcpyToSymbolAsync(filter_device, filter, sizeof(float)*9, 0, hipMemcpyHostToDevice);
if(argc >= 4){
if(strcmp(argv[3], "texture") == 0) withtex = true;
}
// Create window
screen = SDL_CreateWindow("Original",
100,
100,
width, height, SDL_WINDOW_SHOWN);
if ( screen == NULL ) {
fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError());
exit(1);
}
screen_res = SDL_CreateWindow("Filtered",
300,
300,
width, height, SDL_WINDOW_SHOWN);
if ( screen_res == NULL ) {
fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError());
exit(1);
}
// Initialize CUDA
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
// Create renderer and texture
ren = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
tex = SDL_CreateTextureFromSurface(ren, surf);
// Show image
SDL_RenderCopy(ren, tex, NULL, NULL);
SDL_RenderPresent(ren);
// Compute
SDL_LockSurface(surf);
float elapsed;
if(withtex){
elapsed = conv_texture_gpu((unsigned char*)surf->pixels, width, height);
}else{
elapsed = conv_global_gpu((unsigned char*)surf->pixels, width, height);
}
SDL_UnlockSurface(surf);
// Show computed image
ren_res = SDL_CreateRenderer(screen_res, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
tex_res = SDL_CreateTextureFromSurface(ren_res, surf);
SDL_RenderCopy(ren_res, tex_res, NULL, NULL);
SDL_RenderPresent(ren_res);
SDL_FreeSurface(surf);
while (1) {
SDL_WaitEvent(&event);
if ((event.type == SDL_QUIT) || ((event.type == SDL_WINDOWEVENT) && (event.window.event == SDL_WINDOWEVENT_CLOSE))) break;
}
char s[100];
sprintf(s, "Kernel execution time: %.4fms", elapsed);
SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_INFORMATION, "Timing", s, screen);
SDL_DestroyTexture(tex);
SDL_DestroyRenderer(ren);
SDL_DestroyWindow(screen);
SDL_DestroyWindow(screen_res);
exit(0);
}
|
d59f02f77627c10dad577994873aa423ccec341a.cu
|
#include "SDL_image.h"
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cuStopwatch.cu"
#include <iterator>
#include <iostream>
enum conv_t{
CONV_IDENTITY,
CONV_EDGE,
CONV_SHARP,
CONV_GAUSS
};
SDL_Window *screen;
SDL_Window *screen_res;
SDL_Renderer *ren;
SDL_Renderer *ren_res;
SDL_Texture *tex;
SDL_Texture *tex_res;
SDL_Surface *surf;
int32_t width, height;
float filter[9];
float __constant__ filter_device[9];
// Check if the index on the row is reachable
__device__ int checkRowPixels(int row, int currentIdx, int width, int height) {
if(row < 0 || row == height) {
return 0;
}
return width * 3 * currentIdx;
}
// Check if the index on the column is reachable
__device__ int checkColumnPixels(int column, int currentIdy, int width) {
if(column < 0 || column == width) {
return 0;
}
return currentIdy * 3;
}
// Compute and apply on the picture the given filter
__device__ char compute_filter(const unsigned char* src, int tidx, int tidy, int w, int h, int32_t color) {
int result = 0;
int row = tidx / w; // representation of 2D coordinate x
int column = tidy / h; // representation of 2D coordinate y
for(int i = -1; i <= 1 ; i++){
row = checkRowPixels(row + i, i, w, h); // check if row case we want to retrieve is outside the table
for(int j = -1; j <= 1; j++) {
column = checkColumnPixels(column + j, j, w); // check if column case we want to retrieve is outside the table
result += filter_device[(i+1)*3+(j+1)] * src[tidx*3 + tidy*3 + row + column + color]; // tids * 3 / three colors represented
// operation to get the good index (0, 1, 2...)
}
}
return (result < 0 ? 0 : (result > 255 ? 255 : result)); // the value of a color of a pixel can range from 0 to 255
// so if we want a nice image we have to truncate the value
}
/**
Apply the given filter on the given image stored in the global memory
*/
__global__ void conv_global(const unsigned char* src, unsigned char* dest, int32_t w, int32_t h){
int tIdx = blockDim.x * blockIdx.x + threadIdx.x; // identifiant of thread x
int tIdy = blockDim.y * blockIdx.y + threadIdx.y; // identifiant of thread y
if(tIdx < w*h*3) { // check if we are in the image to compute the filter
for(int color = 0; color < 3; color++) { // loop on the three colors
dest[tIdx*3 + color] = compute_filter(src, tIdx, tIdy, w, h, color); // compute and store in the dest the value of each pixel after passed the filter
}
}
}
// cudaAlloc/ cudaMemoryCopy from the host and back to the host
float conv_global_gpu(unsigned char* pixels, int32_t w, int32_t h){
// todo: write the code that manages memory (global memory) and invokes the kernel conv_global, it should return the running time
int size = w * h * 3;
unsigned char *pixels_dev, *output_dev, *host_pixels;
cuStopwatch timer;
// start the timer
timer.start();
cudaHostAlloc((void **)&host_pixels, size * sizeof(unsigned char), cudaHostAllocDefault); // Allocate Host memory of the image from the device / get the image from the host and transfer to the gpu
cudaMalloc((void **)&pixels_dev, size * sizeof(unsigned char)); // allocate the original image for the GPU
cudaMalloc((void **)&output_dev, size * sizeof(unsigned char)); // allocate image filtered for the GPU
memcpy(host_pixels, pixels, size * sizeof(unsigned char)); // copy the content picture on the host
cudaMemcpy(pixels_dev, host_pixels, size * sizeof(unsigned char), cudaMemcpyHostToDevice); // copy the content of the host on the allocated array
// allow to transfer the picture from the host memory to the device memory
int block_number = ceil((h * w) / 1024); // ideal number of thread per block
conv_global<<<block_number, 1024>>>(pixels_dev, output_dev, w, h); // applying the filter
cudaMemcpy(host_pixels, output_dev, size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // transfer the new picture filtered from the device to the host memory
memcpy(pixels, host_pixels, size);
// Free Memory allocated
cudaFreeHost(host_pixels);
cudaFree(pixels_dev);
cudaFree(output_dev);
return timer.stop(); // stop the timer
}
__global__ void conv_texture(cudaTextureObject_t src, unsigned char* dest, int32_t w, int32_t h){
// todo: write a kernel to apply the given filter on the given image stored as a texture
}
float conv_texture_gpu(unsigned char* pixels, int32_t w, int32_t h){
// todo: write the code that manages memory (texture memory) and invokes the kernel conv_global, it should return the running time
return 0.0;
}
int main(int argc, char** argv){
SDL_Event event;
bool withtex = false;
// Initialize SDL
if( SDL_Init(SDL_INIT_VIDEO) < 0 ) {
fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError());
exit(1);
}
atexit(SDL_Quit);
if(argc == 1){
exit(1);
}
// Read image and option
IMG_Init(IMG_INIT_PNG);
surf = IMG_Load(argv[1]);
if(surf == NULL){
fprintf(stderr, "Error loading image.\n");
exit(1);
}
width = surf->w;
height = surf->h;
SDL_SetSurfaceRLE(surf, 1);
// Initialize involution kernel
conv_t conv_type;
if(argc >= 3){
if (strcmp(argv[2], "identity") == 0) conv_type = CONV_IDENTITY;
else if (strcmp(argv[2], "edge") == 0) conv_type= CONV_EDGE;
else if (strcmp(argv[2], "sharp") == 0) conv_type= CONV_SHARP;
else if (strcmp(argv[2], "gauss") == 0) conv_type = CONV_GAUSS;
else conv_type = CONV_IDENTITY;
}
switch(conv_type){
case CONV_EDGE:
filter[0] = -1; filter[1] = -1; filter[2] = -1;
filter[3] = -1; filter[4] = 8; filter[5] = -1;
filter[6] = -1; filter[7] = -1; filter[8] = -1;
break;
case CONV_SHARP:
filter[0] = 0; filter[1] = -1; filter[2] = 0;
filter[3] = -1; filter[4] = 5; filter[5] = -1;
filter[6] = 0; filter[7] = -1; filter[8] = 0;
break;
case CONV_GAUSS:
filter[0] = 1.0f/16; filter[1] = 1.0f/8; filter[2] = 1.0f/16;
filter[3] = 1.0f/8; filter[4] = 1.0f/4; filter[5] = 1.0f/8;
filter[6] = 1.0f/16; filter[7] = 1.0f/8; filter[8] = 1.0f/8;
break;
default:
filter[0] = 0; filter[1] = 0; filter[2] = 0;
filter[3] = 0; filter[4] = 1; filter[5] = 0;
filter[6] = 0; filter[7] = 0; filter[8] = 0;
break;
}
cudaMemcpyToSymbolAsync(filter_device, filter, sizeof(float)*9, 0, cudaMemcpyHostToDevice);
if(argc >= 4){
if(strcmp(argv[3], "texture") == 0) withtex = true;
}
// Create window
screen = SDL_CreateWindow("Original",
100,
100,
width, height, SDL_WINDOW_SHOWN);
if ( screen == NULL ) {
fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError());
exit(1);
}
screen_res = SDL_CreateWindow("Filtered",
300,
300,
width, height, SDL_WINDOW_SHOWN);
if ( screen_res == NULL ) {
fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError());
exit(1);
}
// Initialize CUDA
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
// Create renderer and texture
ren = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
tex = SDL_CreateTextureFromSurface(ren, surf);
// Show image
SDL_RenderCopy(ren, tex, NULL, NULL);
SDL_RenderPresent(ren);
// Compute
SDL_LockSurface(surf);
float elapsed;
if(withtex){
elapsed = conv_texture_gpu((unsigned char*)surf->pixels, width, height);
}else{
elapsed = conv_global_gpu((unsigned char*)surf->pixels, width, height);
}
SDL_UnlockSurface(surf);
// Show computed image
ren_res = SDL_CreateRenderer(screen_res, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
tex_res = SDL_CreateTextureFromSurface(ren_res, surf);
SDL_RenderCopy(ren_res, tex_res, NULL, NULL);
SDL_RenderPresent(ren_res);
SDL_FreeSurface(surf);
while (1) {
SDL_WaitEvent(&event);
if ((event.type == SDL_QUIT) || ((event.type == SDL_WINDOWEVENT) && (event.window.event == SDL_WINDOWEVENT_CLOSE))) break;
}
char s[100];
sprintf(s, "Kernel execution time: %.4fms", elapsed);
SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_INFORMATION, "Timing", s, screen);
SDL_DestroyTexture(tex);
SDL_DestroyRenderer(ren);
SDL_DestroyWindow(screen);
SDL_DestroyWindow(screen_res);
exit(0);
}
|
f07a23274b849af26993bdbe7630d7df1e1f7b94.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
namespace {
const char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_cuda);
} // namespace native
} // namespace at
|
f07a23274b849af26993bdbe7630d7df1e1f7b94.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
namespace {
const char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_cuda);
} // namespace native
} // namespace at
|
e399b373b1783e8b0c7d38c98c09379a126a7a01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y) {
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rmi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 64; // Multiple of threads_per_block * cells_per_word
int domain_y = 64;
int cells_per_word = 1;
int steps = 10;
int threads_per_block = 64;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(hipMemcpy(domain_gpu[0], domain_cpu, domain_size, hipMemcpyHostToDevice));
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Kernel execution
int shared_mem_size = threads_per_block * sizeof(int);
for(int i = 0; i < steps; i++) {
hipLaunchKernelGGL(( life_kernel), dim3(grid), dim3(threads), shared_mem_size , 0, domain_gpu[i%2], domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(hipMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(domain_gpu[0]));
CUDA_SAFE_CALL(hipFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Step: %d Red/Blue cells: %d/%d\n", steps, red, blue);
free(domain_cpu);
return 0;
}
|
e399b373b1783e8b0c7d38c98c09379a126a7a01.cu
|
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y) {
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rémi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 64; // Multiple of threads_per_block * cells_per_word
int domain_y = 64;
int cells_per_word = 1;
int steps = 10;
int threads_per_block = 64;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(cudaMemcpy(domain_gpu[0], domain_cpu, domain_size, cudaMemcpyHostToDevice));
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Kernel execution
int shared_mem_size = threads_per_block * sizeof(int);
for(int i = 0; i < steps; i++) {
life_kernel<<< grid, threads, shared_mem_size >>>(domain_gpu[i%2], domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(cudaMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(domain_gpu[0]));
CUDA_SAFE_CALL(cudaFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Step: %d Red/Blue cells: %d/%d\n", steps, red, blue);
free(domain_cpu);
return 0;
}
|
6a73745ae2c00203dfb2efe054c8c59d7803fa91.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=1
#include <hip/hip_runtime.h>
__global__ void test_Prog(int *A, int N) {
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
for(int d = N/2; d > 0; d = d / 2)
{
int tmp=A[tid + d];
for (int i = 0; i < N; ++i)
{
int tmp2=A[tid];
int t2=tmp2;
int t32=t2;
if (tid < d) {
A[tid] = tmp + t32;
}
}
}
}
|
6a73745ae2c00203dfb2efe054c8c59d7803fa91.cu
|
//pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
for(int d = N/2; d > 0; d = d / 2)
{
int tmp=A[tid + d];
for (int i = 0; i < N; ++i)
{
int tmp2=A[tid];
int t2=tmp2;
int t32=t2;
if (tid < d) {
A[tid] = tmp + t32;
}
}
}
}
|
882e3da18d6b0855b39d17650dd55c13fbc134d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<acocuda.h>
__device__ void eval(int* solptr,float* phobjpnt, joints* grp, int npts, int ncfg)// funzione obiettivo da personalizzare
{
float phinc=0;
float nrm1[6]={0,0,0,0,0,0};
float nrm2=0;
for(int e=0;e<npts-1;e++)
{
nrm1[0] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[0]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[0];
nrm1[1] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[1]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[1];
nrm1[2] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[2]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[2];
nrm1[3] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[3]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[3];
nrm1[4] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[4]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[4];
nrm1[5] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[5]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[5];
nrm2 = sqrt(nrm1[0]*nrm1[0]+nrm1[1]*nrm1[1]+nrm1[2]*nrm1[2]+nrm1[3]*nrm1[3]+nrm1[4]*nrm1[4]+nrm1[5]*nrm1[5]) ;
phinc = phinc + __fdividef(1, nrm2);
}
*phobjpnt=1000*__fdividef(phinc,npts-1);
}
__global__ void Cycle(int n_pnt,int n_conf,int n_threads,joints* dev_graph_ptr,unsigned int seed,int n_cycles,float phmin,float phmax,float phdec)
{
float rnd_sel,prev_ph,tot_ph;
hiprandState_t state;
extern __shared__ int shmem[];
int *sol = (int *)&shmem;
float *phobj = (float *)&shmem[n_pnt*n_threads];
hiprand_init(clock64() ,threadIdx.x, 0, &state);
__syncthreads();
for(int cyc=0;cyc<n_cycles;cyc++) //CYCLE NUMBER
{
for (int pnt=0 ; pnt<n_pnt ; pnt++) //PROBABILISTIC SELECTION IMPLEMENTATION
{
prev_ph=0;
tot_ph =0;
rnd_sel=hiprand_uniform(&state);
for(int cht=0;cht<n_conf;cht++) //sum of all pherormone in configurations
{
tot_ph=tot_ph+(*(dev_graph_ptr+pnt*n_conf+cht)).ph;
}
rnd_sel = rnd_sel * tot_ph;
for(int conf=0;conf<n_conf;conf++)
{
prev_ph=prev_ph+(*(dev_graph_ptr+pnt*n_conf+conf)).ph;
if(rnd_sel<=prev_ph)
{
sol[threadIdx.x*n_pnt + pnt]=conf;
break;
}
if(rnd_sel>prev_ph && conf==(n_conf-1)){
printf("BIG ERROR\n");
return;
}
}
}
__syncthreads();
eval(sol,&phobj[threadIdx.x],dev_graph_ptr,n_pnt,n_conf); //calcolo ph per ogni soluzione totale
// printf("ph obj : %f\n",phobj[threadIdx.x]);
if(threadIdx.x<n_pnt)
{
for(int mm=0;mm<n_conf;mm++)
{
if((*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph > phmin && (*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ch)
{
atomicMul(&(*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph,phdec); //FIX
//atomicAdd(&(*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph,0.01); //FIX
}
}
}
__syncthreads();
if(threadIdx.x<n_pnt)
{
for(int q=0;q<n_threads;q++)
{
if((*(dev_graph_ptr+threadIdx.x*n_conf+sol[q*n_pnt+threadIdx.x])).ph < phmax )
{
atomicAdd(&(*(dev_graph_ptr+threadIdx.x*n_conf+sol[q*n_pnt+threadIdx.x])).ph,phobj[q]);
}
}
}
__syncthreads();
}
}
///////////CLASS METHODS
AcoCuda::AcoCuda(int n_pointsex, int n_confex,int ncyc,float phminex,float phmaxex,float phdecex)
{
n_conf=n_confex;
n_points=n_pointsex;
n_cycles=ncyc;
if(n_points<=96){
n_threads=static_cast<int>(ceil(static_cast<float>(n_points)/32)*32); //32 or 16??
}
else
{
n_threads=n_points;
}
n_blocks=1;
phmin=phminex;
phmax=phmaxex;
phdec=phdecex;
printf("points: %d\n",n_points);
printf("config: %d\n",n_conf);
printf("threads: %d\n",n_threads);
printf("blocks: %d\n",n_blocks);
printf("cycles: %d\n",n_cycles);
printf("ph min: %f\n",phmin);
printf("ph max: %f\n",phmax);
printf("ph evaporation: %f\n",phdec);
this->shrbytes =(n_points*n_threads)*sizeof(int)+n_threads*sizeof(float);
printf("shared bytes: %lu \n",shrbytes);
thrust::host_vector<joints> tmp(n_pointsex*n_confex);
host_graph=tmp;
}
void AcoCuda::LoadGraph()
{
srand(time(NULL));
for(thrust::host_vector<joints>::iterator j = host_graph.begin(); j != host_graph.end(); j++)
{
if(rand()<(RAND_MAX*0.6))
{
(*j).ch=true;
(*j).jointsval[0]=-20+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/40);
(*j).jointsval[1]=-30+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/60);
(*j).jointsval[2]=-30+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/60);
(*j).jointsval[3]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
(*j).jointsval[4]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
(*j).jointsval[5]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
}
else
{
(*j).ch=false;
(*j).jointsval[0]=0;
(*j).jointsval[1]=0;
(*j).jointsval[2]=0;
(*j).jointsval[3]=0;
(*j).jointsval[4]=0;
(*j).jointsval[5]=0;
}
}
}
void AcoCuda::PhInit()
{
float n_act;
int ind;
ind=0;
std::vector<float> ph_ind;
ph_ind.clear();
for(thrust::host_vector<joints>::iterator j = host_graph.begin(); j != host_graph.end();){
n_act=0;
for (int u=0;u<n_conf;u++)
{
n_act=n_act+(*j).ch;
j++;
}
n_act = 1/n_act;
ph_ind.push_back(n_act);
}
for(thrust::host_vector<joints>::iterator z = host_graph.begin(); z != host_graph.end();){
for (int uu=0;uu<n_conf;uu++)
{
if ((*z).ch){
(*z).ph=ph_ind[ind];
}
else{
(*z).ph=0;
}
z++;
}
ind++;
}
device_graph=host_graph;
device_graph_ptr = thrust::raw_pointer_cast((device_graph.data()));
host_graph_ptr = thrust::raw_pointer_cast((host_graph.data()));
}
/////////////METHODS
void AcoCuda::RunCycle() //launch cuda kernel
{
hipLaunchKernelGGL(( Cycle), dim3(n_blocks),dim3(n_threads),shrbytes, 0, n_points,n_conf,n_threads,device_graph_ptr,time(NULL),1,phmin,phmax,phdec);//<<<blocks,thread>>>
if (hipSuccess != hipDeviceSynchronize()) {
printf("ERROR in Cycle\n");
exit(-2);
}
}
void AcoCuda::print_file(bool jnts) //log data to external file
{
FILE *fp;
std::ostringstream name;
name << "log/" << "pnt" << n_points << "cnf" << n_conf << "cyc" << n_cycles << "phmin" << phmin << "phmax" << phmax << "phdec" << phdec;
fp = fopen(name.str().c_str(),"a");
joints* ptr = this->host_graph_ptr;
if(0){ //fix this
for(int i=0; i < n_points; i++)
{
for(int k=0;k<6;k++)
{
for(int j=0; j<n_conf; j++)
{
if ((*(ptr+j+n_conf*i)).jointsval[k]>0){
if ((*(ptr+j+n_conf*i)).jointsval[k] < 1000 && (*(ptr+j+n_conf*i)).jointsval[k] > 100) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] < 100 && (*(ptr+j+n_conf*i)).jointsval[k] > 10) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] < 10) fprintf(fp," ");
}
if ((*(ptr+j+n_conf*i)).jointsval[k]<0){
if ((*(ptr+j+n_conf*i)).jointsval[k] > -1000 && (*(ptr+j+n_conf*i)).jointsval[k] < -100) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] > -100 && (*(ptr+j+n_conf*i)).jointsval[k] < -10) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] > -10) fprintf(fp," ");
}
if ((*(ptr+j+n_conf*i)).jointsval[k]==0) fprintf(fp," ");
fprintf(fp," %.2f",(*(ptr+j+n_conf*i)).jointsval[k]);
}
fprintf(fp,"\n");
}
fprintf(fp,"\n\n\n");
}
}
for(int z=0; z < n_points*n_conf; z++){
if (ptr->ph < 1000 && ptr->ph > 100) fprintf(fp," ");
if (ptr->ph < 100 && ptr->ph > 10) fprintf(fp," ");
if (ptr->ph < 10) fprintf(fp," ");
fprintf(fp," %.2f",ptr->ph);
if (z%n_conf==(n_conf-1)) fprintf(fp,"\n");
ptr++;
}
fprintf(fp,"\n");
fclose(fp);
}
void AcoCuda::copytohost() //copy results from gpu to host
{
thrust::copy(this->device_graph.begin(),this->device_graph.end(),this->host_graph.begin());
hipDeviceSynchronize();
}
////////////MAIN
int main(int argc, char *argv[]){
float pointsnumber;
int configurations=10;
pointsnumber=atof(argv[1]);
// int ncyc=atoi(argv[2]);;
int ncyc = 500;
for(float gg=0.1;gg<=0.95;gg=gg+0.05){
AcoCuda test(pointsnumber,configurations,ncyc,0.15,2000,gg);//points,conf,cycles,phmin,phmax,phdec
test.LoadGraph();
test.PhInit();
for (int t=0;t<ncyc;t++){
test.RunCycle();
test.copytohost();
test.print_file(t==0);
}
test.~AcoCuda();
}
return 0;
}
|
882e3da18d6b0855b39d17650dd55c13fbc134d6.cu
|
#include<acocuda.h>
__device__ void eval(int* solptr,float* phobjpnt, joints* grp, int npts, int ncfg)// funzione obiettivo da personalizzare
{
float phinc=0;
float nrm1[6]={0,0,0,0,0,0};
float nrm2=0;
for(int e=0;e<npts-1;e++)
{
nrm1[0] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[0]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[0];
nrm1[1] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[1]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[1];
nrm1[2] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[2]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[2];
nrm1[3] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[3]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[3];
nrm1[4] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[4]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[4];
nrm1[5] = (*(grp+e*ncfg+solptr[e+threadIdx.x*npts])).jointsval[5]-(*(grp+(e+1)*ncfg+solptr[e+(threadIdx.x*npts)+1])).jointsval[5];
nrm2 = sqrt(nrm1[0]*nrm1[0]+nrm1[1]*nrm1[1]+nrm1[2]*nrm1[2]+nrm1[3]*nrm1[3]+nrm1[4]*nrm1[4]+nrm1[5]*nrm1[5]) ;
phinc = phinc + __fdividef(1, nrm2);
}
*phobjpnt=1000*__fdividef(phinc,npts-1);
}
__global__ void Cycle(int n_pnt,int n_conf,int n_threads,joints* dev_graph_ptr,unsigned int seed,int n_cycles,float phmin,float phmax,float phdec)
{
float rnd_sel,prev_ph,tot_ph;
curandState_t state;
extern __shared__ int shmem[];
int *sol = (int *)&shmem;
float *phobj = (float *)&shmem[n_pnt*n_threads];
curand_init(clock64() ,threadIdx.x, 0, &state);
__syncthreads();
for(int cyc=0;cyc<n_cycles;cyc++) //CYCLE NUMBER
{
for (int pnt=0 ; pnt<n_pnt ; pnt++) //PROBABILISTIC SELECTION IMPLEMENTATION
{
prev_ph=0;
tot_ph =0;
rnd_sel=curand_uniform(&state);
for(int cht=0;cht<n_conf;cht++) //sum of all pherormone in configurations
{
tot_ph=tot_ph+(*(dev_graph_ptr+pnt*n_conf+cht)).ph;
}
rnd_sel = rnd_sel * tot_ph;
for(int conf=0;conf<n_conf;conf++)
{
prev_ph=prev_ph+(*(dev_graph_ptr+pnt*n_conf+conf)).ph;
if(rnd_sel<=prev_ph)
{
sol[threadIdx.x*n_pnt + pnt]=conf;
break;
}
if(rnd_sel>prev_ph && conf==(n_conf-1)){
printf("BIG ERROR\n");
return;
}
}
}
__syncthreads();
eval(sol,&phobj[threadIdx.x],dev_graph_ptr,n_pnt,n_conf); //calcolo ph per ogni soluzione totale
// printf("ph obj : %f\n",phobj[threadIdx.x]);
if(threadIdx.x<n_pnt)
{
for(int mm=0;mm<n_conf;mm++)
{
if((*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph > phmin && (*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ch)
{
atomicMul(&(*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph,phdec); //FIX
//atomicAdd(&(*(dev_graph_ptr+threadIdx.x*n_conf+mm)).ph,0.01); //FIX
}
}
}
__syncthreads();
if(threadIdx.x<n_pnt)
{
for(int q=0;q<n_threads;q++)
{
if((*(dev_graph_ptr+threadIdx.x*n_conf+sol[q*n_pnt+threadIdx.x])).ph < phmax )
{
atomicAdd(&(*(dev_graph_ptr+threadIdx.x*n_conf+sol[q*n_pnt+threadIdx.x])).ph,phobj[q]);
}
}
}
__syncthreads();
}
}
///////////CLASS METHODS
AcoCuda::AcoCuda(int n_pointsex, int n_confex,int ncyc,float phminex,float phmaxex,float phdecex)
{
n_conf=n_confex;
n_points=n_pointsex;
n_cycles=ncyc;
if(n_points<=96){
n_threads=static_cast<int>(ceil(static_cast<float>(n_points)/32)*32); //32 or 16??
}
else
{
n_threads=n_points;
}
n_blocks=1;
phmin=phminex;
phmax=phmaxex;
phdec=phdecex;
printf("points: %d\n",n_points);
printf("config: %d\n",n_conf);
printf("threads: %d\n",n_threads);
printf("blocks: %d\n",n_blocks);
printf("cycles: %d\n",n_cycles);
printf("ph min: %f\n",phmin);
printf("ph max: %f\n",phmax);
printf("ph evaporation: %f\n",phdec);
this->shrbytes =(n_points*n_threads)*sizeof(int)+n_threads*sizeof(float);
printf("shared bytes: %lu \n",shrbytes);
thrust::host_vector<joints> tmp(n_pointsex*n_confex);
host_graph=tmp;
}
void AcoCuda::LoadGraph()
{
srand(time(NULL));
for(thrust::host_vector<joints>::iterator j = host_graph.begin(); j != host_graph.end(); j++)
{
if(rand()<(RAND_MAX*0.6))
{
(*j).ch=true;
(*j).jointsval[0]=-20+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/40);
(*j).jointsval[1]=-30+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/60);
(*j).jointsval[2]=-30+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/60);
(*j).jointsval[3]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
(*j).jointsval[4]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
(*j).jointsval[5]=-180+static_cast <float> (rand()) / static_cast <float> (RAND_MAX/360);
}
else
{
(*j).ch=false;
(*j).jointsval[0]=0;
(*j).jointsval[1]=0;
(*j).jointsval[2]=0;
(*j).jointsval[3]=0;
(*j).jointsval[4]=0;
(*j).jointsval[5]=0;
}
}
}
void AcoCuda::PhInit()
{
float n_act;
int ind;
ind=0;
std::vector<float> ph_ind;
ph_ind.clear();
for(thrust::host_vector<joints>::iterator j = host_graph.begin(); j != host_graph.end();){
n_act=0;
for (int u=0;u<n_conf;u++)
{
n_act=n_act+(*j).ch;
j++;
}
n_act = 1/n_act;
ph_ind.push_back(n_act);
}
for(thrust::host_vector<joints>::iterator z = host_graph.begin(); z != host_graph.end();){
for (int uu=0;uu<n_conf;uu++)
{
if ((*z).ch){
(*z).ph=ph_ind[ind];
}
else{
(*z).ph=0;
}
z++;
}
ind++;
}
device_graph=host_graph;
device_graph_ptr = thrust::raw_pointer_cast((device_graph.data()));
host_graph_ptr = thrust::raw_pointer_cast((host_graph.data()));
}
/////////////METHODS
void AcoCuda::RunCycle() //launch cuda kernel
{
Cycle<<<n_blocks,n_threads,shrbytes>>>(n_points,n_conf,n_threads,device_graph_ptr,time(NULL),1,phmin,phmax,phdec);//<<<blocks,thread>>>
if (cudaSuccess != cudaDeviceSynchronize()) {
printf("ERROR in Cycle\n");
exit(-2);
}
}
void AcoCuda::print_file(bool jnts) //log data to external file
{
FILE *fp;
std::ostringstream name;
name << "log/" << "pnt" << n_points << "cnf" << n_conf << "cyc" << n_cycles << "phmin" << phmin << "phmax" << phmax << "phdec" << phdec;
fp = fopen(name.str().c_str(),"a");
joints* ptr = this->host_graph_ptr;
if(0){ //fix this
for(int i=0; i < n_points; i++)
{
for(int k=0;k<6;k++)
{
for(int j=0; j<n_conf; j++)
{
if ((*(ptr+j+n_conf*i)).jointsval[k]>0){
if ((*(ptr+j+n_conf*i)).jointsval[k] < 1000 && (*(ptr+j+n_conf*i)).jointsval[k] > 100) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] < 100 && (*(ptr+j+n_conf*i)).jointsval[k] > 10) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] < 10) fprintf(fp," ");
}
if ((*(ptr+j+n_conf*i)).jointsval[k]<0){
if ((*(ptr+j+n_conf*i)).jointsval[k] > -1000 && (*(ptr+j+n_conf*i)).jointsval[k] < -100) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] > -100 && (*(ptr+j+n_conf*i)).jointsval[k] < -10) fprintf(fp," ");
if ((*(ptr+j+n_conf*i)).jointsval[k] > -10) fprintf(fp," ");
}
if ((*(ptr+j+n_conf*i)).jointsval[k]==0) fprintf(fp," ");
fprintf(fp," %.2f",(*(ptr+j+n_conf*i)).jointsval[k]);
}
fprintf(fp,"\n");
}
fprintf(fp,"\n\n\n");
}
}
for(int z=0; z < n_points*n_conf; z++){
if (ptr->ph < 1000 && ptr->ph > 100) fprintf(fp," ");
if (ptr->ph < 100 && ptr->ph > 10) fprintf(fp," ");
if (ptr->ph < 10) fprintf(fp," ");
fprintf(fp," %.2f",ptr->ph);
if (z%n_conf==(n_conf-1)) fprintf(fp,"\n");
ptr++;
}
fprintf(fp,"\n");
fclose(fp);
}
void AcoCuda::copytohost() //copy results from gpu to host
{
thrust::copy(this->device_graph.begin(),this->device_graph.end(),this->host_graph.begin());
cudaDeviceSynchronize();
}
////////////MAIN
int main(int argc, char *argv[]){
float pointsnumber;
int configurations=10;
pointsnumber=atof(argv[1]);
// int ncyc=atoi(argv[2]);;
int ncyc = 500;
for(float gg=0.1;gg<=0.95;gg=gg+0.05){
AcoCuda test(pointsnumber,configurations,ncyc,0.15,2000,gg);//points,conf,cycles,phmin,phmax,phdec
test.LoadGraph();
test.PhInit();
for (int t=0;t<ncyc;t++){
test.RunCycle();
test.copytohost();
test.print_file(t==0);
}
test.~AcoCuda();
}
return 0;
}
|
5da2dbac4a8e965e035fd4a9739e82bca283649d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::hip::compat::max(roi_width, (T)1.);
roi_height = c10::hip::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->numel(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( RoIAlignRotatedBackward<float>)
, dim3(CAFFE_GET_BLOCKS(dY.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
dY.numel(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>(),
aligned_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
|
5da2dbac4a8e965e035fd4a9739e82bca283649d.cu
|
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::cuda::compat::max(roi_width, (T)1.);
roi_height = c10::cuda::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->numel(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.numel() > 0) { // Handle possibly empty gradient if there were no rois
RoIAlignRotatedBackward<float>
<<<CAFFE_GET_BLOCKS(dY.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
dY.numel(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>(),
aligned_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
|
4eab9f5666b2d5101d22d79c88ce82bd48d527f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "triad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
float s = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
triad), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,s);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
triad), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,s);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
triad), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,s);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4eab9f5666b2d5101d22d79c88ce82bd48d527f3.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "triad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
float s = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
triad<<<gridBlock,threadBlock>>>(A,B,C,s);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
triad<<<gridBlock,threadBlock>>>(A,B,C,s);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
triad<<<gridBlock,threadBlock>>>(A,B,C,s);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6dbde4f201ffc2f09c36e5e8e23a59f95a386e5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:46 2012
*/
#include "common_magma.h"
/*
Matrix is divided into 64 x n block rows.
Each block has 64 threads.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (row >= m) are disabled.
@author Mark Gates
*/
__global__ void
clacpy_kernel( int m, int n,
const cuFloatComplex *A, int lda,
cuFloatComplex *B, int ldb )
{
int row = blockIdx.x*64 + threadIdx.x;
if ( row < m ) {
A += row;
B += row;
const cuFloatComplex *Aend = A + lda*n;
while( A < Aend ) {
*B = *A;
A += lda;
B += ldb;
}
}
}
extern "C" void
magmablas_clacpy( char uplo, magma_int_t m, magma_int_t n,
const cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *B, magma_int_t ldb )
{
/*
Note
========
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
=======
CLACPY copies all or part of a two-dimensional matrix A to another
matrix B.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix A to be copied to B.
= 'U': Upper triangular part
= 'L': Lower triangular part
Otherwise: All of the matrix A
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input) COMPLEX DOUBLE PRECISION array, dimension (LDA,N)
The m by n matrix A. If UPLO = 'U', only the upper triangle
or trapezoid is accessed; if UPLO = 'L', only the lower
triangle or trapezoid is accessed.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
B (output) COMPLEX DOUBLE PRECISION array, dimension (LDB,N)
On exit, B = A in the locations specified by UPLO.
LDB (input) INTEGER
The leading dimension of the array B. LDB >= max(1,M).
===================================================================== */
dim3 threads( 64 );
dim3 grid( m/64 + (m%64 != 0) );
//printf( "m %d, n %d, grid %d, threads %d\n", m, n, grid.x, threads.x );
if ( m == 0 || n == 0 )
return;
if ( (uplo == 'U') || (uplo == 'u') ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
hipLaunchKernelGGL(( clacpy_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda, B, ldb );
}
}
|
6dbde4f201ffc2f09c36e5e8e23a59f95a386e5e.cu
|
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:46 2012
*/
#include "common_magma.h"
/*
Matrix is divided into 64 x n block rows.
Each block has 64 threads.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (row >= m) are disabled.
@author Mark Gates
*/
__global__ void
clacpy_kernel( int m, int n,
const cuFloatComplex *A, int lda,
cuFloatComplex *B, int ldb )
{
int row = blockIdx.x*64 + threadIdx.x;
if ( row < m ) {
A += row;
B += row;
const cuFloatComplex *Aend = A + lda*n;
while( A < Aend ) {
*B = *A;
A += lda;
B += ldb;
}
}
}
extern "C" void
magmablas_clacpy( char uplo, magma_int_t m, magma_int_t n,
const cuFloatComplex *A, magma_int_t lda,
cuFloatComplex *B, magma_int_t ldb )
{
/*
Note
========
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
=======
CLACPY copies all or part of a two-dimensional matrix A to another
matrix B.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix A to be copied to B.
= 'U': Upper triangular part
= 'L': Lower triangular part
Otherwise: All of the matrix A
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input) COMPLEX DOUBLE PRECISION array, dimension (LDA,N)
The m by n matrix A. If UPLO = 'U', only the upper triangle
or trapezoid is accessed; if UPLO = 'L', only the lower
triangle or trapezoid is accessed.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
B (output) COMPLEX DOUBLE PRECISION array, dimension (LDB,N)
On exit, B = A in the locations specified by UPLO.
LDB (input) INTEGER
The leading dimension of the array B. LDB >= max(1,M).
===================================================================== */
dim3 threads( 64 );
dim3 grid( m/64 + (m%64 != 0) );
//printf( "m %d, n %d, grid %d, threads %d\n", m, n, grid.x, threads.x );
if ( m == 0 || n == 0 )
return;
if ( (uplo == 'U') || (uplo == 'u') ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
clacpy_kernel<<< grid, threads, 0, magma_stream >>> ( m, n, A, lda, B, ldb );
}
}
|
deee6be62261f38729d8d6e1f674db1b7e545249.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "GradientIncrementKernel.cuh"
#include "Constants.hpp"
#include "../Types.cuh"
#include <hip/hip_runtime.h>
#include <cassert>
using namespace rnn;
using namespace rnn::cuda;
__global__
void gradientIncrementKernel(LayerBatchDeltas layerDeltas, ConnectionActivation connection,
CuMatrix outGradient, unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
// buffer for holding the layer weight matrix chunk
float *ldChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *connChunk = (float *) &buf[spitch * blockDim.y];
const int dCol = blockDim.y * blockIdx.y + threadIdx.x;
const int cCol = col;
const int numChunks = (layerDeltas.batchSize + blockDim.y - 1) / blockDim.y;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.y;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.y) {
const int dRow = chunkOffset + threadIdx.y;
if (dRow < layerDeltas.batchSize && dCol < layerDeltas.delta.cols) {
ldChunk[chunkIndex] = *Elem(layerDeltas.delta, dRow, dCol);
}
const int cRow = dRow;
if (cRow < connection.batchSize && cCol < connection.activation.cols) {
connChunk[chunkIndex] = *Elem(connection.activation, cRow, cCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, layerDeltas.batchSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += ldChunk[threadIdx.y + j * spitch] * connChunk[threadIdx.x + j * spitch];
}
__syncthreads();
}
if (row < outGradient.rows && col < outGradient.cols) {
*Elem(outGradient, row, col) += sum;
}
}
void GradientIncrementKernel::Apply(LayerBatchDeltas layerDeltas, ConnectionActivation connection,
CuMatrix outGradient, hipStream_t stream) {
assert(layerDeltas.batchSize == connection.batchSize);
assert(layerDeltas.delta.cols == outGradient.rows);
assert(connection.activation.cols == outGradient.cols);
int bpgX = (outGradient.cols + TPB_X - 1) / TPB_X;
int bpgY = (outGradient.rows + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
hipLaunchKernelGGL(( gradientIncrementKernel), dim3(dim3(bpgX, bpgY, 1)), dim3(dim3(TPB_X, TPB_Y, 1)), sharedMemSize, stream,
layerDeltas, connection, outGradient, spitch);
}
|
deee6be62261f38729d8d6e1f674db1b7e545249.cu
|
#include "GradientIncrementKernel.cuh"
#include "Constants.hpp"
#include "../Types.cuh"
#include <cuda_runtime.h>
#include <cassert>
using namespace rnn;
using namespace rnn::cuda;
__global__
void gradientIncrementKernel(LayerBatchDeltas layerDeltas, ConnectionActivation connection,
CuMatrix outGradient, unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
// buffer for holding the layer weight matrix chunk
float *ldChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *connChunk = (float *) &buf[spitch * blockDim.y];
const int dCol = blockDim.y * blockIdx.y + threadIdx.x;
const int cCol = col;
const int numChunks = (layerDeltas.batchSize + blockDim.y - 1) / blockDim.y;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.y;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.y) {
const int dRow = chunkOffset + threadIdx.y;
if (dRow < layerDeltas.batchSize && dCol < layerDeltas.delta.cols) {
ldChunk[chunkIndex] = *Elem(layerDeltas.delta, dRow, dCol);
}
const int cRow = dRow;
if (cRow < connection.batchSize && cCol < connection.activation.cols) {
connChunk[chunkIndex] = *Elem(connection.activation, cRow, cCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, layerDeltas.batchSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += ldChunk[threadIdx.y + j * spitch] * connChunk[threadIdx.x + j * spitch];
}
__syncthreads();
}
if (row < outGradient.rows && col < outGradient.cols) {
*Elem(outGradient, row, col) += sum;
}
}
void GradientIncrementKernel::Apply(LayerBatchDeltas layerDeltas, ConnectionActivation connection,
CuMatrix outGradient, cudaStream_t stream) {
assert(layerDeltas.batchSize == connection.batchSize);
assert(layerDeltas.delta.cols == outGradient.rows);
assert(connection.activation.cols == outGradient.cols);
int bpgX = (outGradient.cols + TPB_X - 1) / TPB_X;
int bpgY = (outGradient.rows + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
gradientIncrementKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
layerDeltas, connection, outGradient, spitch);
}
|
b9b7e36de8ff03afafd0085ec4d086ac7e6997ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
// CUDA utilities and system includes
#include <hip/hip_runtime.h>
// Helper functions
#include <helper_functions.h> // CUDA SDK Helper functions
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <helper_image.h>
#include <hip/hip_runtime_api.h>
char *image_filename = "./data/lena.pgm";
unsigned int width, height;
unsigned char *h_img = NULL;
unsigned char *d_img = NULL;
#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 32
// Uncomment the line below to compile
// the kernel using CUDA Texture Object
// It works only on graphics cards with Compute Capability >= 3.0
//#define USE_TEXTURE_OBJECT
//////////////////////////////////////
/// Radial blur using global memory
//////////////////////////////////////
template<unsigned short RADIUS >
__global__ void kRadialBlur( unsigned char* img, unsigned width, unsigned height, size_t pitch)
{
__shared__ unsigned char sh[BLOCK_HEIGHT + 2*RADIUS][BLOCK_WIDTH + 2*RADIUS];
int g_x = blockDim.x*blockIdx.x + threadIdx.x;
int g_y = blockDim.y*blockIdx.y + threadIdx.y;
int pid_x = threadIdx.x + RADIUS;
int pid_y = threadIdx.y + RADIUS;
///////////////////////
// gather into shared memory
///////////////////////
sh[pid_y][pid_x] = img[ g_y*pitch + g_x];
// halo
if ( ( threadIdx.x < RADIUS ) && ( g_x >= RADIUS ) )
{
sh[pid_y][pid_x - RADIUS] = img[ g_y*pitch + g_x - RADIUS];
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x - RADIUS] = img[ (g_y - RADIUS)*pitch + g_x - RADIUS];
}
if ( ( threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS) ) )
{
sh[pid_y + RADIUS][pid_x - RADIUS] = img[ (g_y + RADIUS)*pitch + g_x - RADIUS];
}
}
if ( ( threadIdx.x > ( BLOCK_WIDTH -1 - RADIUS ) ) && ( g_x < ( width - RADIUS ) ) )
{
sh[pid_y][pid_x + RADIUS ] = img[ g_y*pitch + g_x + RADIUS];
if ( ( threadIdx.y < RADIUS ) && ( g_y > RADIUS ) )
{
sh[pid_y - RADIUS][pid_x + RADIUS] = img[ (g_y - RADIUS)*pitch + g_x + RADIUS];
}
if ( (threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x + RADIUS] = img[ (g_y + RADIUS)*pitch + g_x + RADIUS];
}
}
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x] = img[ (g_y - RADIUS)*pitch + g_x];
}
if ( ( threadIdx.y > ( BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x] = img[ ( g_y + RADIUS)*pitch + g_x ];
}
__syncthreads();
//////////////////////
// compute the blurred value
//////////////////////
unsigned val = 0;
unsigned k = 0;
for (int i=-RADIUS; i<= RADIUS; i++ )
for ( int j=-RADIUS; j<=RADIUS ; j++ )
{
if ( ( ( g_x + j ) < 0 ) || ( ( g_x + j ) > ( width - 1) ) )
continue;
if ( ( ( g_y + i ) < 0 ) || ( ( g_y + i ) > ( height - 1) ) )
continue;
val += sh[pid_y + i][pid_x + j];
k++;
}
val /= k;
////////////////////
// write into global memory
///////////////
img[ g_y*pitch + g_x ] = (unsigned char) val;
}
#ifdef USE_TEXTURE_OBJECT
//////////////////////////////////////
/// Radial blur using texture memory
//////////////////////////////////////
template<unsigned short RADIUS>
__global__ void kRadialBlur( unsigned char* img, hipTextureObject_t tex,
unsigned width, unsigned height, size_t pitch)
{
__shared__ unsigned char sh[BLOCK_HEIGHT + 2*RADIUS][BLOCK_WIDTH + 2*RADIUS];
int g_x = blockDim.x*blockIdx.x + threadIdx.x;
int g_y = blockDim.y*blockIdx.y + threadIdx.y;
int pid_x = threadIdx.x + RADIUS;
int pid_y = threadIdx.y + RADIUS;
///////////////////////
// gather into shared memory
///////////////////////
sh[pid_y][pid_x] = tex2D<unsigned char>(tex, g_x, g_y);
// halo
if ( ( threadIdx.x < RADIUS ) && ( g_x >= RADIUS ) )
{
sh[pid_y][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x - RADIUS , g_y);
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x , g_y - RADIUS);
}
if ( ( threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS) ) )
{
sh[pid_y + RADIUS][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x - RADIUS, g_y - RADIUS);
}
}
if ( ( threadIdx.x > ( BLOCK_WIDTH -1 - RADIUS ) ) && ( g_x < ( width - RADIUS ) ) )
{
sh[pid_y][pid_x + RADIUS ] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y );
if ( ( threadIdx.y < RADIUS ) && ( g_y > RADIUS ) )
{
sh[pid_y - RADIUS][pid_x + RADIUS] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y - RADIUS);
}
if ( (threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x + RADIUS] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y + RADIUS);
}
}
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x] = tex2D<unsigned char>(tex, g_x , g_y - RADIUS);
}
if ( ( threadIdx.y > ( BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x] = tex2D<unsigned char>(tex, g_x , g_y + RADIUS);
}
__syncthreads();
//////////////////////
// compute the blurred value
//////////////////////
unsigned val = 0;
unsigned k = 0;
for (int i=-RADIUS; i<= RADIUS; i++ )
for ( int j=-RADIUS; j<=RADIUS ; j++ )
{
if ( ( ( g_x + j ) < 0 ) || ( ( g_x + j ) > ( width - 1) ) )
continue;
if ( ( ( g_y + i ) < 0 ) || ( ( g_y + i ) > ( height - 1) ) )
continue;
val += sh[pid_y + i][pid_x + j];
k++;
}
val /= k;
////////////////////
// write into global memory
///////////////
img[ g_y*pitch + g_x ] = (unsigned char) val;
}
#endif // USE_TEXTURE_OBJECT
int main(int argc, char* argv[])
{
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
hipProfilerStart();
hipError_t err;
// load image (needed so we can get the width and height before we create the window
sdkLoadPGM(image_filename, (unsigned char **) &h_img, &width, &height);
printf("width: %d \t height: %d \n", width, height);
// fill GPU memory
unsigned char* d_img = NULL;
size_t pitch;
hipMallocPitch( (void**) &d_img, &pitch, width*sizeof(unsigned char), height );
hipMemcpy2D( d_img, pitch*sizeof(unsigned char),
h_img, width*sizeof(unsigned char), width*sizeof(unsigned char), height,
hipMemcpyHostToDevice );
#ifdef USE_TEXTURE_OBJECT
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<unsigned char>();
hipArray* cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, h_img,
width*height*sizeof(unsigned char),
hipMemcpyHostToDevice );
hipResourceDesc resDesc;
memset( &resDesc, 0, sizeof(resDesc) );
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuArray;
hipTextureDesc texDesc;
memset( &texDesc, 0, sizeof( texDesc ) );
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
hipTextureObject_t texObj = 0;
hipCreateTextureObject( &texObj, &resDesc, &texDesc, NULL );
#endif // USE_TEXTURE_OBJECT
// create vars for timing
hipEvent_t startEvent, stopEvent;
err = hipEventCreate(&startEvent, 0);
assert( err == hipSuccess );
err = hipEventCreate(&stopEvent, 0);
assert( err == hipSuccess );
float elapsedTime;
// process image
dim3 dGrid(width / BLOCK_WIDTH, height / BLOCK_HEIGHT);
dim3 dBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
// execution of the version using global memory
hipEventRecord(startEvent);
hipLaunchKernelGGL(( kRadialBlur<4>) , dim3(dGrid), dim3(dBlock) , 0, 0, d_img, width, height, pitch );
hipDeviceSynchronize();
hipEventRecord(stopEvent);
hipEventSynchronize( stopEvent );
hipEventElapsedTime( &elapsedTime, startEvent, stopEvent);
printf("elapsed time of version using global memory: %f\n", elapsedTime );
#ifdef USE_TEXTURE_OBJECT
// execution of the version using texture memory
if ( deviceProp.major >= 3 ) // Texture objects are supported from arch 3.X
{
hipEventRecord(startEvent);
hipLaunchKernelGGL(( kRadialBlur<4>) , dim3(dGrid), dim3(dBlock) , 0, 0, d_img, texObj, width, height, pitch );
hipDeviceSynchronize();
hipEventRecord(stopEvent);
hipEventSynchronize( stopEvent );
hipEventElapsedTime( &elapsedTime, startEvent, stopEvent);
printf("elapsed time of version using texture memory: %f\n", elapsedTime );
}
else
{
printf("CUDA Texture Object requires a GPU with compute capability "
"3.0 or later\n");
}
#endif
// save image
hipMemcpy2D( h_img, width*sizeof(unsigned char),
d_img, pitch*sizeof(unsigned char), width*sizeof(unsigned char), height,
hipMemcpyDeviceToHost );
sdkSavePGM("./data/blurred_tex.ppm", h_img, width, height );
// free memory
#ifdef USE_TEXTURE_OBJECT
hipDestroyTextureObject(texObj);
hipFreeArray(cuArray);
#endif
hipFree(d_img);
hipProfilerStop();
hipDeviceReset();
free(h_img);
return 0;
}
|
b9b7e36de8ff03afafd0085ec4d086ac7e6997ed.cu
|
#include <iostream>
// CUDA utilities and system includes
#include <cuda_runtime.h>
// Helper functions
#include <helper_functions.h> // CUDA SDK Helper functions
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <helper_image.h>
#include <cuda_profiler_api.h>
char *image_filename = "./data/lena.pgm";
unsigned int width, height;
unsigned char *h_img = NULL;
unsigned char *d_img = NULL;
#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 32
// Uncomment the line below to compile
// the kernel using CUDA Texture Object
// It works only on graphics cards with Compute Capability >= 3.0
//#define USE_TEXTURE_OBJECT
//////////////////////////////////////
/// Radial blur using global memory
//////////////////////////////////////
template<unsigned short RADIUS >
__global__ void kRadialBlur( unsigned char* img, unsigned width, unsigned height, size_t pitch)
{
__shared__ unsigned char sh[BLOCK_HEIGHT + 2*RADIUS][BLOCK_WIDTH + 2*RADIUS];
int g_x = blockDim.x*blockIdx.x + threadIdx.x;
int g_y = blockDim.y*blockIdx.y + threadIdx.y;
int pid_x = threadIdx.x + RADIUS;
int pid_y = threadIdx.y + RADIUS;
///////////////////////
// gather into shared memory
///////////////////////
sh[pid_y][pid_x] = img[ g_y*pitch + g_x];
// halo
if ( ( threadIdx.x < RADIUS ) && ( g_x >= RADIUS ) )
{
sh[pid_y][pid_x - RADIUS] = img[ g_y*pitch + g_x - RADIUS];
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x - RADIUS] = img[ (g_y - RADIUS)*pitch + g_x - RADIUS];
}
if ( ( threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS) ) )
{
sh[pid_y + RADIUS][pid_x - RADIUS] = img[ (g_y + RADIUS)*pitch + g_x - RADIUS];
}
}
if ( ( threadIdx.x > ( BLOCK_WIDTH -1 - RADIUS ) ) && ( g_x < ( width - RADIUS ) ) )
{
sh[pid_y][pid_x + RADIUS ] = img[ g_y*pitch + g_x + RADIUS];
if ( ( threadIdx.y < RADIUS ) && ( g_y > RADIUS ) )
{
sh[pid_y - RADIUS][pid_x + RADIUS] = img[ (g_y - RADIUS)*pitch + g_x + RADIUS];
}
if ( (threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x + RADIUS] = img[ (g_y + RADIUS)*pitch + g_x + RADIUS];
}
}
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x] = img[ (g_y - RADIUS)*pitch + g_x];
}
if ( ( threadIdx.y > ( BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x] = img[ ( g_y + RADIUS)*pitch + g_x ];
}
__syncthreads();
//////////////////////
// compute the blurred value
//////////////////////
unsigned val = 0;
unsigned k = 0;
for (int i=-RADIUS; i<= RADIUS; i++ )
for ( int j=-RADIUS; j<=RADIUS ; j++ )
{
if ( ( ( g_x + j ) < 0 ) || ( ( g_x + j ) > ( width - 1) ) )
continue;
if ( ( ( g_y + i ) < 0 ) || ( ( g_y + i ) > ( height - 1) ) )
continue;
val += sh[pid_y + i][pid_x + j];
k++;
}
val /= k;
////////////////////
// write into global memory
///////////////
img[ g_y*pitch + g_x ] = (unsigned char) val;
}
#ifdef USE_TEXTURE_OBJECT
//////////////////////////////////////
/// Radial blur using texture memory
//////////////////////////////////////
template<unsigned short RADIUS>
__global__ void kRadialBlur( unsigned char* img, cudaTextureObject_t tex,
unsigned width, unsigned height, size_t pitch)
{
__shared__ unsigned char sh[BLOCK_HEIGHT + 2*RADIUS][BLOCK_WIDTH + 2*RADIUS];
int g_x = blockDim.x*blockIdx.x + threadIdx.x;
int g_y = blockDim.y*blockIdx.y + threadIdx.y;
int pid_x = threadIdx.x + RADIUS;
int pid_y = threadIdx.y + RADIUS;
///////////////////////
// gather into shared memory
///////////////////////
sh[pid_y][pid_x] = tex2D<unsigned char>(tex, g_x, g_y);
// halo
if ( ( threadIdx.x < RADIUS ) && ( g_x >= RADIUS ) )
{
sh[pid_y][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x - RADIUS , g_y);
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x , g_y - RADIUS);
}
if ( ( threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS) ) )
{
sh[pid_y + RADIUS][pid_x - RADIUS] = tex2D<unsigned char>(tex, g_x - RADIUS, g_y - RADIUS);
}
}
if ( ( threadIdx.x > ( BLOCK_WIDTH -1 - RADIUS ) ) && ( g_x < ( width - RADIUS ) ) )
{
sh[pid_y][pid_x + RADIUS ] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y );
if ( ( threadIdx.y < RADIUS ) && ( g_y > RADIUS ) )
{
sh[pid_y - RADIUS][pid_x + RADIUS] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y - RADIUS);
}
if ( (threadIdx.y > (BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x + RADIUS] = tex2D<unsigned char>(tex, g_x + RADIUS, g_y + RADIUS);
}
}
if ( ( threadIdx.y < RADIUS ) && ( g_y >= RADIUS ) )
{
sh[pid_y - RADIUS][pid_x] = tex2D<unsigned char>(tex, g_x , g_y - RADIUS);
}
if ( ( threadIdx.y > ( BLOCK_HEIGHT -1 - RADIUS ) ) && ( g_y < ( height - RADIUS ) ) )
{
sh[pid_y + RADIUS][pid_x] = tex2D<unsigned char>(tex, g_x , g_y + RADIUS);
}
__syncthreads();
//////////////////////
// compute the blurred value
//////////////////////
unsigned val = 0;
unsigned k = 0;
for (int i=-RADIUS; i<= RADIUS; i++ )
for ( int j=-RADIUS; j<=RADIUS ; j++ )
{
if ( ( ( g_x + j ) < 0 ) || ( ( g_x + j ) > ( width - 1) ) )
continue;
if ( ( ( g_y + i ) < 0 ) || ( ( g_y + i ) > ( height - 1) ) )
continue;
val += sh[pid_y + i][pid_x + j];
k++;
}
val /= k;
////////////////////
// write into global memory
///////////////
img[ g_y*pitch + g_x ] = (unsigned char) val;
}
#endif // USE_TEXTURE_OBJECT
int main(int argc, char* argv[])
{
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
cudaProfilerStart();
cudaError_t err;
// load image (needed so we can get the width and height before we create the window
sdkLoadPGM(image_filename, (unsigned char **) &h_img, &width, &height);
printf("width: %d \t height: %d \n", width, height);
// fill GPU memory
unsigned char* d_img = NULL;
size_t pitch;
cudaMallocPitch( (void**) &d_img, &pitch, width*sizeof(unsigned char), height );
cudaMemcpy2D( d_img, pitch*sizeof(unsigned char),
h_img, width*sizeof(unsigned char), width*sizeof(unsigned char), height,
cudaMemcpyHostToDevice );
#ifdef USE_TEXTURE_OBJECT
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned char>();
cudaArray* cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, h_img,
width*height*sizeof(unsigned char),
cudaMemcpyHostToDevice );
cudaResourceDesc resDesc;
memset( &resDesc, 0, sizeof(resDesc) );
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
cudaTextureDesc texDesc;
memset( &texDesc, 0, sizeof( texDesc ) );
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject( &texObj, &resDesc, &texDesc, NULL );
#endif // USE_TEXTURE_OBJECT
// create vars for timing
cudaEvent_t startEvent, stopEvent;
err = cudaEventCreate(&startEvent, 0);
assert( err == cudaSuccess );
err = cudaEventCreate(&stopEvent, 0);
assert( err == cudaSuccess );
float elapsedTime;
// process image
dim3 dGrid(width / BLOCK_WIDTH, height / BLOCK_HEIGHT);
dim3 dBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
// execution of the version using global memory
cudaEventRecord(startEvent);
kRadialBlur<4> <<< dGrid, dBlock >>> (d_img, width, height, pitch );
cudaThreadSynchronize();
cudaEventRecord(stopEvent);
cudaEventSynchronize( stopEvent );
cudaEventElapsedTime( &elapsedTime, startEvent, stopEvent);
printf("elapsed time of version using global memory: %f\n", elapsedTime );
#ifdef USE_TEXTURE_OBJECT
// execution of the version using texture memory
if ( deviceProp.major >= 3 ) // Texture objects are supported from arch 3.X
{
cudaEventRecord(startEvent);
kRadialBlur<4> <<< dGrid, dBlock >>> (d_img, texObj, width, height, pitch );
cudaThreadSynchronize();
cudaEventRecord(stopEvent);
cudaEventSynchronize( stopEvent );
cudaEventElapsedTime( &elapsedTime, startEvent, stopEvent);
printf("elapsed time of version using texture memory: %f\n", elapsedTime );
}
else
{
printf("CUDA Texture Object requires a GPU with compute capability "
"3.0 or later\n");
}
#endif
// save image
cudaMemcpy2D( h_img, width*sizeof(unsigned char),
d_img, pitch*sizeof(unsigned char), width*sizeof(unsigned char), height,
cudaMemcpyDeviceToHost );
sdkSavePGM("./data/blurred_tex.ppm", h_img, width, height );
// free memory
#ifdef USE_TEXTURE_OBJECT
cudaDestroyTextureObject(texObj);
cudaFreeArray(cuArray);
#endif
cudaFree(d_img);
cudaProfilerStop();
cudaDeviceReset();
free(h_img);
return 0;
}
|
6918c6600752d6a0ce46391bd67873f2a9a98e52.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "kernel.hip"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
time_t t;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...");
fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm # All matrices are 1000 x 1000"
"\n Usage: ./sgemm <m> # All matrices are m x m"
"\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
/* Intializes random number generator */
srand((unsigned) time(&t));
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = hipMalloc((void**) &A_d, sizeof(float)*A_sz);
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
cuda_ret = hipMalloc((void**) &B_d, sizeof(float)*B_sz);
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
//printf("C_sz: %lu\n", C_sz);
cuda_ret = hipMalloc((void**) &C_d, sizeof(float)*C_sz);
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = hipMemcpy(A_d, A_h, sizeof(float)*A_sz, hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device");
cuda_ret = hipMemcpy(B_d, B_h, sizeof(float)*B_sz, hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...");
fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = hipMemcpy(C_h, C_d, sizeof(float)*C_sz, hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory from device");
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cuda_ret = hipFree(A_d);
if(cuda_ret != hipSuccess) FATAL("Unable to free CUDA memory");
cuda_ret = hipFree(B_d);
if(cuda_ret != hipSuccess) FATAL("Unable to free CUDA memory");
cuda_ret = hipFree(C_d);
if(cuda_ret != hipSuccess) FATAL("Unable to free CUDA memory");
return 0;
}
|
6918c6600752d6a0ce46391bd67873f2a9a98e52.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
time_t t;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...");
fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm # All matrices are 1000 x 1000"
"\n Usage: ./sgemm <m> # All matrices are m x m"
"\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
/* Intializes random number generator */
srand((unsigned) time(&t));
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = cudaMalloc((void**) &A_d, sizeof(float)*A_sz);
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cuda_ret = cudaMalloc((void**) &B_d, sizeof(float)*B_sz);
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
//printf("C_sz: %lu\n", C_sz);
cuda_ret = cudaMalloc((void**) &C_d, sizeof(float)*C_sz);
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = cudaMemcpy(A_d, A_h, sizeof(float)*A_sz, cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device");
cuda_ret = cudaMemcpy(B_d, B_h, sizeof(float)*B_sz, cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...");
fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cuda_ret = cudaMemcpy(C_h, C_d, sizeof(float)*C_sz, cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory from device");
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cuda_ret = cudaFree(A_d);
if(cuda_ret != cudaSuccess) FATAL("Unable to free CUDA memory");
cuda_ret = cudaFree(B_d);
if(cuda_ret != cudaSuccess) FATAL("Unable to free CUDA memory");
cuda_ret = cudaFree(C_d);
if(cuda_ret != cudaSuccess) FATAL("Unable to free CUDA memory");
return 0;
}
|
62d29c016c854acf30de992bdd6a9c70d140e67c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Predictor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double TIME = 1;
double4 *p_pred = NULL;
hipMalloc(&p_pred, XSIZE*YSIZE);
float4 *v_pred = NULL;
hipMalloc(&v_pred, XSIZE*YSIZE);
float4 *a_pred = NULL;
hipMalloc(&a_pred, XSIZE*YSIZE);
double4 *p_corr = NULL;
hipMalloc(&p_corr, XSIZE*YSIZE);
double4 *v_corr = NULL;
hipMalloc(&v_corr, XSIZE*YSIZE);
double *loc_time = NULL;
hipMalloc(&loc_time, XSIZE*YSIZE);
double4 *acc = NULL;
hipMalloc(&acc, XSIZE*YSIZE);
double4 *acc1 = NULL;
hipMalloc(&acc1, XSIZE*YSIZE);
double4 *acc2 = NULL;
hipMalloc(&acc2, XSIZE*YSIZE);
double4 *acc3 = NULL;
hipMalloc(&acc3, XSIZE*YSIZE);
int istart = 1;
int *nvec = NULL;
hipMalloc(&nvec, XSIZE*YSIZE);
int ppgpus = 1;
unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Predictor), dim3(gridBlock),dim3(threadBlock), 0, 0, TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Predictor), dim3(gridBlock),dim3(threadBlock), 0, 0, TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Predictor), dim3(gridBlock),dim3(threadBlock), 0, 0, TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
62d29c016c854acf30de992bdd6a9c70d140e67c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Predictor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double TIME = 1;
double4 *p_pred = NULL;
cudaMalloc(&p_pred, XSIZE*YSIZE);
float4 *v_pred = NULL;
cudaMalloc(&v_pred, XSIZE*YSIZE);
float4 *a_pred = NULL;
cudaMalloc(&a_pred, XSIZE*YSIZE);
double4 *p_corr = NULL;
cudaMalloc(&p_corr, XSIZE*YSIZE);
double4 *v_corr = NULL;
cudaMalloc(&v_corr, XSIZE*YSIZE);
double *loc_time = NULL;
cudaMalloc(&loc_time, XSIZE*YSIZE);
double4 *acc = NULL;
cudaMalloc(&acc, XSIZE*YSIZE);
double4 *acc1 = NULL;
cudaMalloc(&acc1, XSIZE*YSIZE);
double4 *acc2 = NULL;
cudaMalloc(&acc2, XSIZE*YSIZE);
double4 *acc3 = NULL;
cudaMalloc(&acc3, XSIZE*YSIZE);
int istart = 1;
int *nvec = NULL;
cudaMalloc(&nvec, XSIZE*YSIZE);
int ppgpus = 1;
unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Predictor<<<gridBlock,threadBlock>>>(TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Predictor<<<gridBlock,threadBlock>>>(TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Predictor<<<gridBlock,threadBlock>>>(TIME,p_pred,v_pred,a_pred,p_corr,v_corr,loc_time,acc,acc1,acc2,acc3,istart,nvec,ppgpus,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b11be9f81e6e25efa7777e44d1bca5436e95d9ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "assignment.h"
#include <math.h>
#include <stdio.h>
#include <vector>
#include <stdint.h>
bool enabledVerification = false;
bool disablePopulateRandomData = false;
struct StreamTest
{
// Total number of elements (due to streams, this can be HUGE)
unsigned int totalElements;
// Number of elements that stream processes at a time
unsigned int streamSize;
// The number of asynchronous streams
unsigned int numberOfStreams;
// the block size
unsigned int blockSize;
};
struct StreamData
{
HostAndDeviceMemory<float> position, velocity, acceleration, output;
StreamData(size_t num_elements)
: position(num_elements), velocity(num_elements), acceleration(num_elements), output(num_elements) {
// 0 out host memory. If not all streams run, zeroed out memory is correct for validation
position.clearValues();
velocity.clearValues();
acceleration.clearValues();
output.clearValues();
}
};
__global__ void calculationPosition(float* finalPosition, const float* initialPosition, const float* velocity, const float* acceleration)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float initPos = initialPosition[tid];
float veloc = velocity[tid];
float accel = acceleration[tid];
//float time = tid; // integer can overflow on the squared value
int time = tid % 1000; // large values introduce error
float finalPos = initPos + veloc * time + 0.5 * accel * (time * time);
finalPosition[tid] = finalPos;
}
void verifyOutput(const StreamData& data, int streamIndex) {
if (!enabledVerification)
return;
for (int i = 0; i < data.output.size(); i++) {
float initPos = data.position.host()[i];
float veloc = data.velocity.host()[i];
float accel = data.acceleration.host()[i];
float calculated = data.output.host()[i];
int time = i % 1000;
float expectedAnswer = initPos + veloc * time + (0.5) * accel * pow(time, 2);
if (calculated != expectedAnswer) {
printf("ERROR (%d, %d): pos: %f vel: %f acel: %f calc: %f, expected: %f\n", streamIndex, i, initPos, veloc, accel, calculated, expectedAnswer);
}
}
}
void populateData(StreamData& data) {
if (disablePopulateRandomData)
return;
for (size_t i = 0; i < data.output.size(); i++) {
data.position.host()[i] = (float(rand() % 20099)) / 10 - 100; //[-100.00, 100.00]
data.velocity.host()[i] = (float(rand() % 4099)) / 10 - 20; //[-20.00, 20.00]
data.acceleration.host()[i] = (float(rand() % 1099)) / 10 - 5; //[-5.00, 5.00]
}
}
void runStreamTests(const StreamTest& test) {
std::vector<CudaStreamWrapper> streams(test.numberOfStreams);
//std::vector<HostAndDeviceMemory<float>> dataSets;
std::vector<StreamData> dataSets;
std::vector<CudaEventWrapper> events(test.numberOfStreams);
const size_t blocksPerStream = (test.streamSize + test.blockSize - 1) / test.blockSize;
// allocate enough space to not require bounds checking in kernel.
const size_t adjustedNumElementsPerStream = blocksPerStream * test.blockSize;
for (unsigned int i = 0; i < test.numberOfStreams; i++) {
// Create memory blocks of size 'streamSize'
//dataSets.push_back(std::move(HostAndDeviceMemory<float>(adjustedStreamSize)));
dataSets.emplace_back(adjustedNumElementsPerStream);
gpuErrchk(hipEventRecord(events[i].event, streams[i].stream));
}
int currentStreamIndex = 0;
bool streamHasOutputData = false;
for (size_t elementsProcessed = 0; elementsProcessed < test.totalElements; elementsProcessed += test.streamSize) {
// make sure stream is ready
gpuErrchk(hipEventSynchronize(events[currentStreamIndex].event));
//gpuErrchk(hipStreamSynchronize(streams[currentStreamIndex].stream));
StreamData& data = dataSets[currentStreamIndex];
hipStream_t& stream = streams[currentStreamIndex].stream;
// verify results of previous run
if (streamHasOutputData) {
verifyOutput(data, currentStreamIndex);
}
// populate new data
populateData(data);
// send data
data.position.transferToDeviceAsync(stream);
data.velocity.transferToDeviceAsync(stream);
data.acceleration.transferToDeviceAsync(stream);
// process data
hipLaunchKernelGGL(( calculationPosition), dim3(blocksPerStream), dim3(test.blockSize), 0, stream,
data.output.device(), data.position.device(), data.velocity.device(), data.acceleration.device());
// retrieve data back
data.output.transferToHostAsync(stream);
gpuErrchk(hipEventRecord(events[currentStreamIndex].event, stream));
currentStreamIndex++;
if (currentStreamIndex == test.numberOfStreams) {
currentStreamIndex = 0;
streamHasOutputData = true; // all streams have output data now
}
}
// synchronize the streams
//for (auto& cudaEvent : events) {
for (size_t i = 0; i < events.size(); i++) {
gpuErrchk(hipEventSynchronize(events[i].event));
verifyOutput(dataSets[i], i);
}
}
int main(int argc, char* argv[])
{
StreamTest testValues;
testValues.totalElements = 1024; // Total number of elements (due to streams, this can be HUGE)
testValues.streamSize = 128; // Number of elements that stream processes at a time
testValues.numberOfStreams = 4; // The number of asynchronous streams
testValues.blockSize = 32; // the block size
for (int i = 0; i < argc; i++) {
std::string arg = argv[i];
if (arg == "--elements") {
testValues.totalElements = atoi(argv[++i]);
}
else if (arg == "--streamSize") {
testValues.streamSize = atoi(argv[++i]);
}
else if (arg == "--streams") {
testValues.numberOfStreams = atoi(argv[++i]);
}
else if (arg == "--blocksize") {
testValues.blockSize = atoi(argv[++i]);
}
else if (arg == "--enableVerify") {
enabledVerification = true;
}
else if (arg == "--disablePopulateData") {
disablePopulateRandomData = true;
}
}
printf("Elements: %u StreamSize: %u Streams: %u BlockSize: %u\n",
testValues.totalElements, testValues.streamSize, testValues.numberOfStreams, testValues.blockSize);
printf("Verify Output: %d Disable Data Generation: %d\n", (int) enabledVerification, (int) disablePopulateRandomData);
{
TimeCodeBlockCuda kernelRun("Total processing time");
runStreamTests(testValues);
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
gpuErrchk(hipDeviceReset());
return 0;
}
|
b11be9f81e6e25efa7777e44d1bca5436e95d9ec.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "assignment.h"
#include <math.h>
#include <stdio.h>
#include <vector>
#include <stdint.h>
bool enabledVerification = false;
bool disablePopulateRandomData = false;
struct StreamTest
{
// Total number of elements (due to streams, this can be HUGE)
unsigned int totalElements;
// Number of elements that stream processes at a time
unsigned int streamSize;
// The number of asynchronous streams
unsigned int numberOfStreams;
// the block size
unsigned int blockSize;
};
struct StreamData
{
HostAndDeviceMemory<float> position, velocity, acceleration, output;
StreamData(size_t num_elements)
: position(num_elements), velocity(num_elements), acceleration(num_elements), output(num_elements) {
// 0 out host memory. If not all streams run, zeroed out memory is correct for validation
position.clearValues();
velocity.clearValues();
acceleration.clearValues();
output.clearValues();
}
};
__global__ void calculationPosition(float* finalPosition, const float* initialPosition, const float* velocity, const float* acceleration)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float initPos = initialPosition[tid];
float veloc = velocity[tid];
float accel = acceleration[tid];
//float time = tid; // integer can overflow on the squared value
int time = tid % 1000; // large values introduce error
float finalPos = initPos + veloc * time + 0.5 * accel * (time * time);
finalPosition[tid] = finalPos;
}
void verifyOutput(const StreamData& data, int streamIndex) {
if (!enabledVerification)
return;
for (int i = 0; i < data.output.size(); i++) {
float initPos = data.position.host()[i];
float veloc = data.velocity.host()[i];
float accel = data.acceleration.host()[i];
float calculated = data.output.host()[i];
int time = i % 1000;
float expectedAnswer = initPos + veloc * time + (0.5) * accel * pow(time, 2);
if (calculated != expectedAnswer) {
printf("ERROR (%d, %d): pos: %f vel: %f acel: %f calc: %f, expected: %f\n", streamIndex, i, initPos, veloc, accel, calculated, expectedAnswer);
}
}
}
void populateData(StreamData& data) {
if (disablePopulateRandomData)
return;
for (size_t i = 0; i < data.output.size(); i++) {
data.position.host()[i] = (float(rand() % 20099)) / 10 - 100; //[-100.00, 100.00]
data.velocity.host()[i] = (float(rand() % 4099)) / 10 - 20; //[-20.00, 20.00]
data.acceleration.host()[i] = (float(rand() % 1099)) / 10 - 5; //[-5.00, 5.00]
}
}
void runStreamTests(const StreamTest& test) {
std::vector<CudaStreamWrapper> streams(test.numberOfStreams);
//std::vector<HostAndDeviceMemory<float>> dataSets;
std::vector<StreamData> dataSets;
std::vector<CudaEventWrapper> events(test.numberOfStreams);
const size_t blocksPerStream = (test.streamSize + test.blockSize - 1) / test.blockSize;
// allocate enough space to not require bounds checking in kernel.
const size_t adjustedNumElementsPerStream = blocksPerStream * test.blockSize;
for (unsigned int i = 0; i < test.numberOfStreams; i++) {
// Create memory blocks of size 'streamSize'
//dataSets.push_back(std::move(HostAndDeviceMemory<float>(adjustedStreamSize)));
dataSets.emplace_back(adjustedNumElementsPerStream);
gpuErrchk(cudaEventRecord(events[i].event, streams[i].stream));
}
int currentStreamIndex = 0;
bool streamHasOutputData = false;
for (size_t elementsProcessed = 0; elementsProcessed < test.totalElements; elementsProcessed += test.streamSize) {
// make sure stream is ready
gpuErrchk(cudaEventSynchronize(events[currentStreamIndex].event));
//gpuErrchk(cudaStreamSynchronize(streams[currentStreamIndex].stream));
StreamData& data = dataSets[currentStreamIndex];
cudaStream_t& stream = streams[currentStreamIndex].stream;
// verify results of previous run
if (streamHasOutputData) {
verifyOutput(data, currentStreamIndex);
}
// populate new data
populateData(data);
// send data
data.position.transferToDeviceAsync(stream);
data.velocity.transferToDeviceAsync(stream);
data.acceleration.transferToDeviceAsync(stream);
// process data
calculationPosition<<<blocksPerStream, test.blockSize, 0, stream>>>(
data.output.device(), data.position.device(), data.velocity.device(), data.acceleration.device());
// retrieve data back
data.output.transferToHostAsync(stream);
gpuErrchk(cudaEventRecord(events[currentStreamIndex].event, stream));
currentStreamIndex++;
if (currentStreamIndex == test.numberOfStreams) {
currentStreamIndex = 0;
streamHasOutputData = true; // all streams have output data now
}
}
// synchronize the streams
//for (auto& cudaEvent : events) {
for (size_t i = 0; i < events.size(); i++) {
gpuErrchk(cudaEventSynchronize(events[i].event));
verifyOutput(dataSets[i], i);
}
}
int main(int argc, char* argv[])
{
StreamTest testValues;
testValues.totalElements = 1024; // Total number of elements (due to streams, this can be HUGE)
testValues.streamSize = 128; // Number of elements that stream processes at a time
testValues.numberOfStreams = 4; // The number of asynchronous streams
testValues.blockSize = 32; // the block size
for (int i = 0; i < argc; i++) {
std::string arg = argv[i];
if (arg == "--elements") {
testValues.totalElements = atoi(argv[++i]);
}
else if (arg == "--streamSize") {
testValues.streamSize = atoi(argv[++i]);
}
else if (arg == "--streams") {
testValues.numberOfStreams = atoi(argv[++i]);
}
else if (arg == "--blocksize") {
testValues.blockSize = atoi(argv[++i]);
}
else if (arg == "--enableVerify") {
enabledVerification = true;
}
else if (arg == "--disablePopulateData") {
disablePopulateRandomData = true;
}
}
printf("Elements: %u StreamSize: %u Streams: %u BlockSize: %u\n",
testValues.totalElements, testValues.streamSize, testValues.numberOfStreams, testValues.blockSize);
printf("Verify Output: %d Disable Data Generation: %d\n", (int) enabledVerification, (int) disablePopulateRandomData);
{
TimeCodeBlockCuda kernelRun("Total processing time");
runStreamTests(testValues);
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
gpuErrchk(cudaDeviceReset());
return 0;
}
|
cfa3978fad07eeef47ae280dd2024fb1c045697a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Host code.
* Author: Naga Kandasamy
* Date modified: 02/14/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
/* Inlcude the kernel code here. */
#include "matrixmul_kernel.hip"
extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
void checkCUDAError(const char *msg);
int checkResults(float *, float *, int, float);
int
main(int argc, char** argv) {
Matrix M, N, P;
srand(time(NULL));
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
printf("Multiplying matrices on the CPU. \n");
struct timeval start, stop;
gettimeofday(&start, NULL);
Matrix reference = AllocateMatrix(P.height, P.width, 0); /* Compute M * N on the CPU. */
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
gettimeofday(&stop, NULL);
printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\\
(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Check if the device result is equivalent to the expected solution. */
int num_elements = M.height*M.width;
int status = checkResults(reference.elements, P.elements, num_elements, 0.001f);
printf("Test %s\n", (1 == status) ? "PASSED" : "FAILED");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
void
MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 dimBlock, dimGrid;
dimBlock.x = dimBlock.y = TILE_SIZE;
dimBlock.z = 1;
dimGrid.x = (P.width / dimBlock.x) + ((P.width % dimBlock.x) ? 1:0 );
dimGrid.y = (P.height / dimBlock.y) + ((P.height % dimBlock.y) ? 1:0 );
dimGrid.z = 1;
printf("Setting up a %d x %d grid of thread blocks. \n", dimGrid.x, dimGrid.y);
struct timeval start, stop;
gettimeofday(&start, NULL);
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
checkCUDAError("Error in kernel");
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix
AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix
AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++){
M.elements[i] = (init == 0) ? (0.0f) : ((rand()*3 / (float)RAND_MAX));
}
return M;
}
// Copy a host matrix to a device matrix.
void
CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Free a device matrix.
void
FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void
FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
void
checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
|
cfa3978fad07eeef47ae280dd2024fb1c045697a.cu
|
/* Matrix multiplication: C = A * B.
* Host code.
* Author: Naga Kandasamy
* Date modified: 02/14/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
/* Inlcude the kernel code here. */
#include "matrixmul_kernel.cu"
extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
void checkCUDAError(const char *msg);
int checkResults(float *, float *, int, float);
int
main(int argc, char** argv) {
Matrix M, N, P;
srand(time(NULL));
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
printf("Multiplying matrices on the CPU. \n");
struct timeval start, stop;
gettimeofday(&start, NULL);
Matrix reference = AllocateMatrix(P.height, P.width, 0); /* Compute M * N on the CPU. */
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
gettimeofday(&stop, NULL);
printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\\
(stop.tv_usec - start.tv_usec)/(float)1000000));
/* Check if the device result is equivalent to the expected solution. */
int num_elements = M.height*M.width;
int status = checkResults(reference.elements, P.elements, num_elements, 0.001f);
printf("Test %s\n", (1 == status) ? "PASSED" : "FAILED");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
void
MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 dimBlock, dimGrid;
dimBlock.x = dimBlock.y = TILE_SIZE;
dimBlock.z = 1;
dimGrid.x = (P.width / dimBlock.x) + ((P.width % dimBlock.x) ? 1:0 );
dimGrid.y = (P.height / dimBlock.y) + ((P.height % dimBlock.y) ? 1:0 );
dimGrid.z = 1;
printf("Setting up a %d x %d grid of thread blocks. \n", dimGrid.x, dimGrid.y);
struct timeval start, stop;
gettimeofday(&start, NULL);
// Launch the device computation threads!
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
checkCUDAError("Error in kernel");
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix
AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix
AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++){
M.elements[i] = (init == 0) ? (0.0f) : ((rand()*3 / (float)RAND_MAX));
}
return M;
}
// Copy a host matrix to a device matrix.
void
CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void
FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void
FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
|
444775295652ea1e350c05c80d0940a83f7045c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/contrib/BallQuery.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/BallQueryKernel.h"
using namespace open3d::ml::contrib;
void ball_query_launcher(int b,
int n,
int m,
float radius,
int nsample,
const float *new_xyz,
const float *xyz,
int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( ball_query_kernel), dim3(blocks), dim3(threads), 0, stream, b, n, m, radius, nsample,
new_xyz, xyz, idx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
|
444775295652ea1e350c05c80d0940a83f7045c8.cu
|
//***************************************************************************************/
//
// Based on Pointnet2 Library (MIT License):
// https://github.com/sshaoshuai/Pointnet2.PyTorch
//
// Copyright (c) 2019 Shaoshuai Shi
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
//***************************************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/contrib/BallQuery.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pointnet/BallQueryKernel.h"
using namespace open3d::ml::contrib;
void ball_query_launcher(int b,
int n,
int m,
float radius,
int nsample,
const float *new_xyz,
const float *xyz,
int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample,
new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
e91ad59e4132e564908dcb9b280e7a17339848fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarfx.cu, normal z -> d, Tue Aug 30 09:38:31 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarfx_kernel( int m, double *v, double *tau,
double *c, int ldc, double *xnorm,
double *T, int it )
{
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
//double *dc = c + (blockIdx.x-it-1) * ldc;
double *dc = c + (blockIdx.x) * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_D_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
v[j] = MAGMA_D_ONE;
}
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_D_CONJ(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel(const double *T, int ldt, double *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ double tlocal[ BLOCK_SIZE ];
double res = MAGMA_D_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel2(const double *T, int ldt, double *t,
double *y, double *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ double sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_tkernel(double *T, int ldt, double *t, double *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ double sum[ 128 ];
sum[tx] = MAGMA_D_CONJ(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's dlarf routine.
*/
extern "C" void
magma_dlarfx_gpu_q(
magma_int_t m, magma_int_t n,
magmaDouble_ptr v,
magmaDouble_ptr tau,
magmaDouble_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDouble_ptr dT, magma_int_t iter,
magmaDouble_ptr work,
magma_queue_t queue )
{
magma_int_t N = n + iter + 1;
if (iter == 0) {
hipLaunchKernelGGL(( magma_dlarfx_kernel)
, dim3(N), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, v, tau, C, ldc, xnorm, dT+iter*N, iter );
}
else {
hipLaunchKernelGGL(( magma_dlarfx_kernel)
, dim3(N), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, v, tau, C, ldc, xnorm, work, iter );
}
if (iter > 0) {
//magma_dtrmv_kernel
// <<< 1, iter, 0, queue->cuda_stream() >>>
// ( dT, N, dT+iter*N);
hipLaunchKernelGGL(( magma_dtrmv_kernel2)
, dim3(iter), dim3(iter), 0, queue->cuda_stream() ,
dT, N, work, dT+iter*N, tau );
}
}
|
e91ad59e4132e564908dcb9b280e7a17339848fb.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarfx.cu, normal z -> d, Tue Aug 30 09:38:31 2016
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_dlarfx_kernel( int m, double *v, double *tau,
double *c, int ldc, double *xnorm,
double *T, int it )
{
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
//double *dc = c + (blockIdx.x-it-1) * ldc;
double *dc = c + (blockIdx.x) * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_D_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
v[j] = MAGMA_D_ONE;
}
else
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
double temp = MAGMA_D_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_D_CONJ(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_D_ZERO;
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel(const double *T, int ldt, double *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ double tlocal[ BLOCK_SIZE ];
double res = MAGMA_D_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_kernel2(const double *T, int ldt, double *t,
double *y, double *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ double sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
/******************************************************************************/
extern "C"
__global__
void magma_dtrmv_tkernel(double *T, int ldt, double *t, double *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ double sum[ 128 ];
sum[tx] = MAGMA_D_CONJ(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's dlarf routine.
*/
extern "C" void
magma_dlarfx_gpu_q(
magma_int_t m, magma_int_t n,
magmaDouble_ptr v,
magmaDouble_ptr tau,
magmaDouble_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDouble_ptr dT, magma_int_t iter,
magmaDouble_ptr work,
magma_queue_t queue )
{
magma_int_t N = n + iter + 1;
if (iter == 0) {
magma_dlarfx_kernel
<<< N, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m, v, tau, C, ldc, xnorm, dT+iter*N, iter );
}
else {
magma_dlarfx_kernel
<<< N, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m, v, tau, C, ldc, xnorm, work, iter );
}
if (iter > 0) {
//magma_dtrmv_kernel
// <<< 1, iter, 0, queue->cuda_stream() >>>
// ( dT, N, dT+iter*N);
magma_dtrmv_kernel2
<<< iter, iter, 0, queue->cuda_stream() >>>
( dT, N, work, dT+iter*N, tau );
}
}
|
61cdff705c3a552dd57661c2d544cf5a5c5a6851.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_reduce(int *c, int size)
{
/*Identificaciones necesarios*/
/*int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);*/
int position = blockDim.x * blockDim.y * ((blockIdx.y * gridDim.x)+blockIdx.x)+((threadIdx.y*blockDim.x)+threadIdx.x);
if(position<size){
if(size%2 != 0)
{
if(c[position]<c[size-1])
{
c[position]=c[size-1];
}
}else{
if(c[position]<c[position+size/2])
{
c[position]=c[position+size/2];
}
}
}
}
|
61cdff705c3a552dd57661c2d544cf5a5c5a6851.cu
|
#include "includes.h"
__global__ void gpu_reduce(int *c, int size)
{
/*Identificaciones necesarios*/
/*int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);*/
int position = blockDim.x * blockDim.y * ((blockIdx.y * gridDim.x)+blockIdx.x)+((threadIdx.y*blockDim.x)+threadIdx.x);
if(position<size){
if(size%2 != 0)
{
if(c[position]<c[size-1])
{
c[position]=c[size-1];
}
}else{
if(c[position]<c[position+size/2])
{
c[position]=c[position+size/2];
}
}
}
}
|
aef4d82487b186a0b3a845eebb5530a97b62e481.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author : Kim, KyoungHo ([email protected])
Ki-Hwan Kim ([email protected])
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void update_src( int Nx, int Ny, int Nz, int tstep, float *F ) {
int idx, ijk;
idx = threadIdx.x;
//ijk = (idx+1)*Ny*Nz + (Ny/2)*Nz + (Nz/2);
//ijk = (idx+1)*Ny*Nz + (Ny/2 - 30)*Nz + (Nz/2 - 50);
//ijk = (Nx/2 - 30)*Ny*Nz + (idx)*Nz + (Nz/2 - 50);
ijk = (Nx/2-30)*Ny*Nz + (Ny/2-50)*Nz + idx;
F[ijk] += sin(0.1*tstep);
}
|
aef4d82487b186a0b3a845eebb5530a97b62e481.cu
|
/*
Author : Kim, KyoungHo ([email protected])
Ki-Hwan Kim ([email protected])
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void update_src( int Nx, int Ny, int Nz, int tstep, float *F ) {
int idx, ijk;
idx = threadIdx.x;
//ijk = (idx+1)*Ny*Nz + (Ny/2)*Nz + (Nz/2);
//ijk = (idx+1)*Ny*Nz + (Ny/2 - 30)*Nz + (Nz/2 - 50);
//ijk = (Nx/2 - 30)*Ny*Nz + (idx)*Nz + (Nz/2 - 50);
ijk = (Nx/2-30)*Ny*Nz + (Ny/2-50)*Nz + idx;
F[ijk] += sin(0.1*tstep);
}
|
39d3495ad5c884d4228ba47d0af1a4f656cc2e69.hip
|
// !!! This is a file automatically generated by hipify!!!
// From CUDA for Engineers
// Listing 5.5: dd_1d_shared/kernel.cu
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#define TPB 64
#define RAD 1
__global__
void ddKernel(float *d_out, const float *d_in, int size, float h)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > size) return;
if (i == 0 || i == (size-1)) {d_out = 0; return;}
const int s_idx = threadIdx.x + RAD;
extern __shared__ float s_in[];
s_in[s_idx] = d_in[i];
// halo cells
if (threadIdx.x < RAD) {
s_in[s_idx - RAD] = d_in[i - RAD];
s_in[s_idx + blockDim.x] = d_in[i + blockDim.x];
}
// sync & out
__syncthreads();
d_out[i] = (s_in[s_idx+1] + s_in[s_idx-1] - 2.0f*s_in[s_idx]) / (h*h);
}
void ddParallel(float *out, const float *in, int n, float h)
{
float *d_out = 0;
float *d_in = 0;
hipMalloc(&d_out, n*sizeof(float));
hipMalloc(&d_in, n*sizeof(float));
hipMemcpy(d_in, in, n*sizeof(float), hipMemcpyHostToDevice);
// set shared memory size in bytes
const size_t smemsize = (TPB + 2*RAD)*sizeof(float);
hipLaunchKernelGGL(( ddKernel), dim3((n+TPB-1)/TPB), dim3(TPB), smemsize, 0, d_out, d_in, n, h);
hipMemcpy(out, d_out, n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_out);
hipFree(d_in);
}
int main(){
std::cout << "dd_1d_shared\n";
const float PI = 3.1415916;
const int N = 150;
const float h = 2*PI/N;
float x[N] = {0.0f};
float u[N] = {0.0f};
float result_parallel[N] = {0.0f};
for (int i = 0; i < N; i++) {
x[i] = i * (2*PI/N);
u[i] = sinf(x[i]);
}
ddParallel(result_parallel, u, N, h);
std::ofstream outfile;
outfile.open("results.csv");
// x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i]
// u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0
for (int i = 0; i < N; i++) {
outfile << x[i] << ", " << u[i] << ", " <<
result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n";
}
outfile.close();
}
|
39d3495ad5c884d4228ba47d0af1a4f656cc2e69.cu
|
// From CUDA for Engineers
// Listing 5.5: dd_1d_shared/kernel.cu
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
#define TPB 64
#define RAD 1
__global__
void ddKernel(float *d_out, const float *d_in, int size, float h)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > size) return;
if (i == 0 || i == (size-1)) {d_out = 0; return;}
const int s_idx = threadIdx.x + RAD;
extern __shared__ float s_in[];
s_in[s_idx] = d_in[i];
// halo cells
if (threadIdx.x < RAD) {
s_in[s_idx - RAD] = d_in[i - RAD];
s_in[s_idx + blockDim.x] = d_in[i + blockDim.x];
}
// sync & out
__syncthreads();
d_out[i] = (s_in[s_idx+1] + s_in[s_idx-1] - 2.0f*s_in[s_idx]) / (h*h);
}
void ddParallel(float *out, const float *in, int n, float h)
{
float *d_out = 0;
float *d_in = 0;
cudaMalloc(&d_out, n*sizeof(float));
cudaMalloc(&d_in, n*sizeof(float));
cudaMemcpy(d_in, in, n*sizeof(float), cudaMemcpyHostToDevice);
// set shared memory size in bytes
const size_t smemsize = (TPB + 2*RAD)*sizeof(float);
ddKernel<<<(n+TPB-1)/TPB, TPB, smemsize>>>(d_out, d_in, n, h);
cudaMemcpy(out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
cudaFree(d_in);
}
int main(){
std::cout << "dd_1d_shared\n";
const float PI = 3.1415916;
const int N = 150;
const float h = 2*PI/N;
float x[N] = {0.0f};
float u[N] = {0.0f};
float result_parallel[N] = {0.0f};
for (int i = 0; i < N; i++) {
x[i] = i * (2*PI/N);
u[i] = sinf(x[i]);
}
ddParallel(result_parallel, u, N, h);
std::ofstream outfile;
outfile.open("results.csv");
// x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i]
// u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0
for (int i = 0; i < N; i++) {
outfile << x[i] << ", " << u[i] << ", " <<
result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n";
}
outfile.close();
}
|
d2865f1dcea2bff7894c493496f8d6a67f4a3ce3.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/hip/HIPContext.h"
#include "open3d/core/nns/FixedRadiusSearchImpl.cuh"
#include "open3d/core/nns/NeighborSearchCommon.h"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/NeighborSearchAllocator.h"
#include "torch/script.h"
using namespace open3d::core::nns;
template <class T>
void FixedRadiusSearchCUDA(const torch::Tensor& points,
const torch::Tensor& queries,
double radius,
const torch::Tensor& points_row_splits,
const torch::Tensor& queries_row_splits,
const torch::Tensor& hash_table_splits,
const torch::Tensor& hash_table_index,
const torch::Tensor& hash_table_cell_splits,
const Metric metric,
const bool ignore_query_point,
const bool return_distances,
torch::Tensor& neighbors_index,
torch::Tensor& neighbors_row_splits,
torch::Tensor& neighbors_distance) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
auto device = points.device().type();
auto device_idx = points.device().index();
NeighborSearchAllocator<T> output_allocator(device, device_idx);
void* temp_ptr = nullptr;
size_t temp_size = 0;
// determine temp_size
open3d::core::nns::impl::FixedRadiusSearchCUDA(
stream, temp_ptr, temp_size, texture_alignment,
neighbors_row_splits.data_ptr<int64_t>(), points.size(0),
points.data_ptr<T>(), queries.size(0), queries.data_ptr<T>(),
T(radius), points_row_splits.size(0),
points_row_splits.data_ptr<int64_t>(), queries_row_splits.size(0),
queries_row_splits.data_ptr<int64_t>(),
(uint32_t*)hash_table_splits.data_ptr<int32_t>(),
hash_table_cell_splits.size(0),
(uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(),
(uint32_t*)hash_table_index.data_ptr<int32_t>(), metric,
ignore_query_point, return_distances, output_allocator);
auto temp_tensor = CreateTempTensor(temp_size, points.device(), &temp_ptr);
// actually run the search
open3d::core::nns::impl::FixedRadiusSearchCUDA(
stream, temp_ptr, temp_size, texture_alignment,
neighbors_row_splits.data_ptr<int64_t>(), points.size(0),
points.data_ptr<T>(), queries.size(0), queries.data_ptr<T>(),
T(radius), points_row_splits.size(0),
points_row_splits.data_ptr<int64_t>(), queries_row_splits.size(0),
queries_row_splits.data_ptr<int64_t>(),
(uint32_t*)hash_table_splits.data_ptr<int32_t>(),
hash_table_cell_splits.size(0),
(uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(),
(uint32_t*)hash_table_index.data_ptr<int32_t>(), metric,
ignore_query_point, return_distances, output_allocator);
neighbors_index = output_allocator.NeighborsIndex();
neighbors_distance = output_allocator.NeighborsDistance();
}
#define INSTANTIATE(T) \
template void FixedRadiusSearchCUDA<T>( \
const torch::Tensor& points, const torch::Tensor& queries, \
double radius, const torch::Tensor& points_row_splits, \
const torch::Tensor& queries_row_splits, \
const torch::Tensor& hash_table_splits, \
const torch::Tensor& hash_table_index, \
const torch::Tensor& hash_table_cell_splits, const Metric metric, \
const bool ignore_query_point, const bool return_distances, \
torch::Tensor& neighbors_index, \
torch::Tensor& neighbors_row_splits, \
torch::Tensor& neighbors_distance);
INSTANTIATE(float)
|
d2865f1dcea2bff7894c493496f8d6a67f4a3ce3.cu
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
#include "ATen/cuda/CUDAContext.h"
#include "open3d/core/nns/FixedRadiusSearchImpl.cuh"
#include "open3d/core/nns/NeighborSearchCommon.h"
#include "open3d/ml/pytorch/TorchHelper.h"
#include "open3d/ml/pytorch/misc/NeighborSearchAllocator.h"
#include "torch/script.h"
using namespace open3d::core::nns;
template <class T>
void FixedRadiusSearchCUDA(const torch::Tensor& points,
const torch::Tensor& queries,
double radius,
const torch::Tensor& points_row_splits,
const torch::Tensor& queries_row_splits,
const torch::Tensor& hash_table_splits,
const torch::Tensor& hash_table_index,
const torch::Tensor& hash_table_cell_splits,
const Metric metric,
const bool ignore_query_point,
const bool return_distances,
torch::Tensor& neighbors_index,
torch::Tensor& neighbors_row_splits,
torch::Tensor& neighbors_distance) {
auto stream = at::cuda::getCurrentCUDAStream();
auto cuda_device_props = at::cuda::getCurrentDeviceProperties();
const int texture_alignment = cuda_device_props->textureAlignment;
auto device = points.device().type();
auto device_idx = points.device().index();
NeighborSearchAllocator<T> output_allocator(device, device_idx);
void* temp_ptr = nullptr;
size_t temp_size = 0;
// determine temp_size
open3d::core::nns::impl::FixedRadiusSearchCUDA(
stream, temp_ptr, temp_size, texture_alignment,
neighbors_row_splits.data_ptr<int64_t>(), points.size(0),
points.data_ptr<T>(), queries.size(0), queries.data_ptr<T>(),
T(radius), points_row_splits.size(0),
points_row_splits.data_ptr<int64_t>(), queries_row_splits.size(0),
queries_row_splits.data_ptr<int64_t>(),
(uint32_t*)hash_table_splits.data_ptr<int32_t>(),
hash_table_cell_splits.size(0),
(uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(),
(uint32_t*)hash_table_index.data_ptr<int32_t>(), metric,
ignore_query_point, return_distances, output_allocator);
auto temp_tensor = CreateTempTensor(temp_size, points.device(), &temp_ptr);
// actually run the search
open3d::core::nns::impl::FixedRadiusSearchCUDA(
stream, temp_ptr, temp_size, texture_alignment,
neighbors_row_splits.data_ptr<int64_t>(), points.size(0),
points.data_ptr<T>(), queries.size(0), queries.data_ptr<T>(),
T(radius), points_row_splits.size(0),
points_row_splits.data_ptr<int64_t>(), queries_row_splits.size(0),
queries_row_splits.data_ptr<int64_t>(),
(uint32_t*)hash_table_splits.data_ptr<int32_t>(),
hash_table_cell_splits.size(0),
(uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(),
(uint32_t*)hash_table_index.data_ptr<int32_t>(), metric,
ignore_query_point, return_distances, output_allocator);
neighbors_index = output_allocator.NeighborsIndex();
neighbors_distance = output_allocator.NeighborsDistance();
}
#define INSTANTIATE(T) \
template void FixedRadiusSearchCUDA<T>( \
const torch::Tensor& points, const torch::Tensor& queries, \
double radius, const torch::Tensor& points_row_splits, \
const torch::Tensor& queries_row_splits, \
const torch::Tensor& hash_table_splits, \
const torch::Tensor& hash_table_index, \
const torch::Tensor& hash_table_cell_splits, const Metric metric, \
const bool ignore_query_point, const bool return_distances, \
torch::Tensor& neighbors_index, \
torch::Tensor& neighbors_row_splits, \
torch::Tensor& neighbors_distance);
INSTANTIATE(float)
|
ea2fed9b0120c3b93abb1f1ebdff4a3772e23513.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "createLaplacianKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *grid = NULL;
hipMalloc(&grid, XSIZE*YSIZE);
float *kernel = NULL;
hipMalloc(&kernel, XSIZE*YSIZE);
int nrDimensions = 1;
int nrGridElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
createLaplacianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grid,kernel,nrDimensions,nrGridElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
createLaplacianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grid,kernel,nrDimensions,nrGridElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
createLaplacianKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grid,kernel,nrDimensions,nrGridElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ea2fed9b0120c3b93abb1f1ebdff4a3772e23513.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "createLaplacianKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *grid = NULL;
cudaMalloc(&grid, XSIZE*YSIZE);
float *kernel = NULL;
cudaMalloc(&kernel, XSIZE*YSIZE);
int nrDimensions = 1;
int nrGridElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
createLaplacianKernel<<<gridBlock,threadBlock>>>(grid,kernel,nrDimensions,nrGridElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
createLaplacianKernel<<<gridBlock,threadBlock>>>(grid,kernel,nrDimensions,nrGridElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
createLaplacianKernel<<<gridBlock,threadBlock>>>(grid,kernel,nrDimensions,nrGridElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
df2ecf6c17ca070f53193b729cb952638a2f5197.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpu-map.hpp"
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <stdio.h>
// El kernel d_f implementa la funcin f aplicadas a un valor x cualquiera.
// Este kernel est declarado con el calificador __device__,
// por lo que slo puede ser llamado por cdigo que est siendo ejecutado
// en la GPU.
__device__ float d_f(float x, int M) {
float s = 0.0;
// 10000
for (int k = 1; k <= M; ++k) {
s += sinf(2 * float(M_PI) * k * x);
}
return s;
}
// El kernel map_f se encarga de que cada hebra
// ejecute la funcin d_f con el argumento apropiado.
__global__ void map_f(float x[], int M) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
x[i] = d_f(x[i],M);
}
// gpu_map debe hacer toda la burocracia de ejecutar cdigo en la GPU:
// reserva memoria, copia el arreglo, lanza el kernel,
// copia el resultado de vuelta y libera la memoria.
void gpu_map(float x[], unsigned n, int M) {
int threadsPerBlock = 64;
int blocksPerGrid = n/threadsPerBlock;
// Variables para manejar los eventos de inicio y fin
hipEvent_t start, stop;
// Nueva variable para el tiempo en la GPU
float gpuTime;
// Nueva variable para el calculo en la GPU
float *d;
// Reserva de memoria en GPU
hipMalloc( (void**) &d, n*sizeof(float));
// Copiar el arreglo a la memoria de la GPU
hipMemcpy(d, x, n*sizeof(float), hipMemcpyHostToDevice);
// Cramos los dos eventos de inicio y fin de procesamiento
hipEventCreate(&start);
hipEventCreate(&stop);
// Establecemos el inicio del procesamiento
hipEventRecord(start, 0);
// Llamada del kernel
hipLaunchKernelGGL(( map_f), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d,M);
// Sincronizamos las hebras una vez que termine el llamado del kernel
hipDeviceSynchronize();
// Establecemos el fin del procesamiento
hipEventRecord(stop, 0);
// Sincronizamos la detencion del proceso
hipEventSynchronize(stop);
// Obtenemos el tiempo que demor el proceso en milisegundos
hipEventElapsedTime(&gpuTime, start, stop);
// Eliminamos ambos eventos
hipEventDestroy(start);
hipEventDestroy(stop);
// Copiar resultado de vuelta
hipMemcpy(x,d, n * sizeof(float), hipMemcpyDeviceToHost);
// Liberacin de memoria
hipFree(d);
// Comentar para tests
//printf("Tiempo GPU: %f\n",gpuTime/1000);
// Descomentar para tests
printf("%f ",gpuTime/1000);
}
|
df2ecf6c17ca070f53193b729cb952638a2f5197.cu
|
#include "gpu-map.hpp"
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
// El kernel d_f implementa la función f aplicadas a un valor x cualquiera.
// Este kernel está declarado con el calificador __device__,
// por lo que sólo puede ser llamado por código que está siendo ejecutado
// en la GPU.
__device__ float d_f(float x, int M) {
float s = 0.0;
// 10000
for (int k = 1; k <= M; ++k) {
s += sinf(2 * float(M_PI) * k * x);
}
return s;
}
// El kernel map_f se encarga de que cada hebra
// ejecute la función d_f con el argumento apropiado.
__global__ void map_f(float x[], int M) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
x[i] = d_f(x[i],M);
}
// gpu_map debe hacer toda la burocracia de ejecutar código en la GPU:
// reserva memoria, copia el arreglo, lanza el kernel,
// copia el resultado de vuelta y libera la memoria.
void gpu_map(float x[], unsigned n, int M) {
int threadsPerBlock = 64;
int blocksPerGrid = n/threadsPerBlock;
// Variables para manejar los eventos de inicio y fin
cudaEvent_t start, stop;
// Nueva variable para el tiempo en la GPU
float gpuTime;
// Nueva variable para el calculo en la GPU
float *d;
// Reserva de memoria en GPU
cudaMalloc( (void**) &d, n*sizeof(float));
// Copiar el arreglo a la memoria de la GPU
cudaMemcpy(d, x, n*sizeof(float), cudaMemcpyHostToDevice);
// Cramos los dos eventos de inicio y fin de procesamiento
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Establecemos el inicio del procesamiento
cudaEventRecord(start, 0);
// Llamada del kernel
map_f<<<blocksPerGrid, threadsPerBlock>>> (d,M);
// Sincronizamos las hebras una vez que termine el llamado del kernel
cudaThreadSynchronize();
// Establecemos el fin del procesamiento
cudaEventRecord(stop, 0);
// Sincronizamos la detencion del proceso
cudaEventSynchronize(stop);
// Obtenemos el tiempo que demoró el proceso en milisegundos
cudaEventElapsedTime(&gpuTime, start, stop);
// Eliminamos ambos eventos
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copiar resultado de vuelta
cudaMemcpy(x,d, n * sizeof(float), cudaMemcpyDeviceToHost);
// Liberación de memoria
cudaFree(d);
// Comentar para tests
//printf("Tiempo GPU: %f\n",gpuTime/1000);
// Descomentar para tests
printf("%f ",gpuTime/1000);
}
|
89f7007026e4d701d7a77116909744f9d668bbfd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GatherPlugin.hpp"
namespace MNN {
template <typename T>
__global__ void GATHER(const int count, const int outputOutsideStride, const int inputOutsideStride, const int N, const int limit, int insideStride,
const T *inputPtr, const int* indicesPtr, T *outputPtr) {
CUDA_KERNEL_LOOP(index, count) {
int o = index / (N*insideStride);
int o_r = index % (N*insideStride);
int i = o_r / insideStride;
int s = o_r % insideStride;
int outputIdx = outputOutsideStride * o + i * insideStride + s;
int indices = indicesPtr[i];
if (indices < 0 || indices > limit) {
outputPtr[outputIdx] = 0.0f;
}else{
int inputIdx = inputOutsideStride * o + insideStride * indicesPtr[i] + s;
outputPtr[outputIdx] = inputPtr[inputIdx];
}
}
}
hipError_t GatherPlugin::GatherExecute(nvinfer1::DataType dataType, const int count, const float* bottom_data, const int* indices,
float* top_data, hipStream_t stream) {
if (dataType == nvinfer1::DataType::kFLOAT){
hipLaunchKernelGGL(( GATHER<float>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, bottom_data, indices, top_data);
}else{
hipLaunchKernelGGL(( GATHER<__half>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, (const __half*)bottom_data, indices, (__half*)top_data);
}
return hipPeekAtLastError();
}
}; // namespace MNN
|
89f7007026e4d701d7a77116909744f9d668bbfd.cu
|
#include "GatherPlugin.hpp"
namespace MNN {
template <typename T>
__global__ void GATHER(const int count, const int outputOutsideStride, const int inputOutsideStride, const int N, const int limit, int insideStride,
const T *inputPtr, const int* indicesPtr, T *outputPtr) {
CUDA_KERNEL_LOOP(index, count) {
int o = index / (N*insideStride);
int o_r = index % (N*insideStride);
int i = o_r / insideStride;
int s = o_r % insideStride;
int outputIdx = outputOutsideStride * o + i * insideStride + s;
int indices = indicesPtr[i];
if (indices < 0 || indices > limit) {
outputPtr[outputIdx] = 0.0f;
}else{
int inputIdx = inputOutsideStride * o + insideStride * indicesPtr[i] + s;
outputPtr[outputIdx] = inputPtr[inputIdx];
}
}
}
cudaError_t GatherPlugin::GatherExecute(nvinfer1::DataType dataType, const int count, const float* bottom_data, const int* indices,
float* top_data, cudaStream_t stream) {
if (dataType == nvinfer1::DataType::kFLOAT){
GATHER<float><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, bottom_data, indices, top_data);
}else{
GATHER<__half><<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS>>>(count, mOutputOutsideStride, mInputOutsideStride, mN, mLimit, mInsideStride, (const __half*)bottom_data, indices, (__half*)top_data);
}
return cudaPeekAtLastError();
}
}; // namespace MNN
|
62cd1474841aff4c90fe1d0931b4ce2fe313a137.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgerbt_func_batched.cu, normal z -> s, Mon Jun 25 18:24:15 2018
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "sgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
SPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched(
magma_int_t n,
float *du, float **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount);
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db_array, 0);
}
/***************************************************************************//**
Purpose
-------
SPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_mv_batched(
magma_int_t n,
float *dv, float **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount);
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db_array, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db_array, n/2);
}
/***************************************************************************//**
Purpose
-------
SPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA REAL array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_batched(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ),
batchCount );
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ),
batchCount );
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
|
62cd1474841aff4c90fe1d0931b4ce2fe313a137.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgerbt_func_batched.cu, normal z -> s, Mon Jun 25 18:24:15 2018
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "sgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
SPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched(
magma_int_t n,
float *du, float **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount);
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db_array, 0);
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db_array, 0);
}
/***************************************************************************//**
Purpose
-------
SPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_mv_batched(
magma_int_t n,
float *dv, float **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount);
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db_array, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db_array, 0);
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db_array, n/2);
}
/***************************************************************************//**
Purpose
-------
SPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA REAL array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_sprbt_batched(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ),
batchCount );
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ),
batchCount );
magmablas_selementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
|
2d529e5f216a9474af6d631fec796d1b8161d3a3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "onnx-tensorrt/onnxplugin.hpp"
using namespace ONNXPlugin;
static __device__ float sigmoid(float x){
return 1 / (1 + expf(-x));
}
static __global__ void MYSELU_kernel_fp32(const float* x, float* output, int edge) {
int position = threadIdx.x + blockDim.x * blockIdx.x;
if(position >= edge) return;
output[position] = x[position] * sigmoid(x[position]);
}
class MYSELU : public TRTPlugin {
public:
SetupPlugin(MYSELU);
virtual void config_finish() override{
printf("\033[33minit MYSELU config: %s\033[0m\n", config_->info_.c_str());
printf("weights count is %d\n", config_->weights_.size());
}
int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) override{
int n = inputs[0].count();
const int nthreads = 512;
int block_size = n < nthreads ? n : nthreads;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( MYSELU_kernel_fp32) , dim3(grid_size), dim3(block_size), 0, stream, inputs[0].ptr<float>(), outputs[0].ptr<float>(), n);
return 0;
}
};
RegisterPlugin(MYSELU);
|
2d529e5f216a9474af6d631fec796d1b8161d3a3.cu
|
#include "onnx-tensorrt/onnxplugin.hpp"
using namespace ONNXPlugin;
static __device__ float sigmoid(float x){
return 1 / (1 + expf(-x));
}
static __global__ void MYSELU_kernel_fp32(const float* x, float* output, int edge) {
int position = threadIdx.x + blockDim.x * blockIdx.x;
if(position >= edge) return;
output[position] = x[position] * sigmoid(x[position]);
}
class MYSELU : public TRTPlugin {
public:
SetupPlugin(MYSELU);
virtual void config_finish() override{
printf("\033[33minit MYSELU config: %s\033[0m\n", config_->info_.c_str());
printf("weights count is %d\n", config_->weights_.size());
}
int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) override{
int n = inputs[0].count();
const int nthreads = 512;
int block_size = n < nthreads ? n : nthreads;
int grid_size = (n + block_size - 1) / block_size;
MYSELU_kernel_fp32 <<<grid_size, block_size, 0, stream>>> (inputs[0].ptr<float>(), outputs[0].ptr<float>(), n);
return 0;
}
};
RegisterPlugin(MYSELU);
|
84cdf9ebe50ee7aafcd2be646ded271fa7216b6e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaTools.h"
#include "Device.h"
#include <iostream>
#include "Lock.h"
#include "HistogrammeHost.h"
using namespace ::std;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
extern void __global__ kernelHistogrammeRand(hiprandState_t* ptrDevTabGeneratorThread, int deviceId);
extern void __global__ kernelHistogrammeWork(hiprandState_t* ptrDevTabGeneratorThread, int* ptrDevTabFrequence, Lock lock, int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Histogramme::Histogramme()
{
//Tools
this->sizeArray = 256;
this->sizeBlock = 16;
this->n = this->sizeArray * this->sizeBlock;
this->deviceId = Device::getDeviceId();
this->dg = dim3(this->sizeBlock, 1, 1);
this->db = dim3(this->sizeArray, 1, 1);
//Output
this->tabFrequence = new int[this->sizeArray];
//Device
this->sizeTabGenerator = sizeof(hiprandState_t) * this->n;
this->sizeSharedMemory = sizeof(int) * this->sizeArray;
this->sizePtrDevTabFrequence = sizeof(int) * this->sizeArray;
HANDLE_ERROR(hipMalloc((void**) &ptrDevTabGeneratorThread, this->sizeTabGenerator));
HANDLE_ERROR(hipMalloc((void**) &ptrDevTabFrequence, this->sizePtrDevTabFrequence));
Device::assertDim(dg, db);
}
Histogramme::~Histogramme()
{
HANDLE_ERROR(hipFree(ptrDevTabGeneratorThread));
HANDLE_ERROR(hipFree(ptrDevTabFrequence));
}
void Histogramme::run()
{
runBuildGeneratorHistogramme();
runComputeHistogramme();
}
void Histogramme::runBuildGeneratorHistogramme()
{
hipLaunchKernelGGL(( kernelHistogrammeRand), dim3(dg),dim3(db), 0, 0, ptrDevTabGeneratorThread,deviceId);
check_CUDA_Error("kernel rand");
HANDLE_ERROR(hipDeviceSynchronize());
}
void Histogramme::runComputeHistogramme()
{
Lock lock;
hipLaunchKernelGGL(( kernelHistogrammeWork), dim3(dg),dim3(db), this->sizeSharedMemory, 0, ptrDevTabGeneratorThread, ptrDevTabFrequence, lock, n);
check_CUDA_Error("kernel work");
//Device -> Host
HANDLE_ERROR(hipMemcpy(&tabFrequence, ptrDevTabFrequence, this->sizePtrDevTabFrequence, hipMemcpyDeviceToHost));
}
void Histogramme::printTabFrequence()
{
for(int i=0;i<this->sizeArray;i++)
{
cout << "i=" << i << " frequence=" << this->tabFrequence[i] << endl;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
84cdf9ebe50ee7aafcd2be646ded271fa7216b6e.cu
|
#include "cuda.h"
#include "cudaTools.h"
#include "Device.h"
#include <iostream>
#include "Lock.h"
#include "HistogrammeHost.h"
using namespace ::std;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
extern void __global__ kernelHistogrammeRand(curandState* ptrDevTabGeneratorThread, int deviceId);
extern void __global__ kernelHistogrammeWork(curandState* ptrDevTabGeneratorThread, int* ptrDevTabFrequence, Lock lock, int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Histogramme::Histogramme()
{
//Tools
this->sizeArray = 256;
this->sizeBlock = 16;
this->n = this->sizeArray * this->sizeBlock;
this->deviceId = Device::getDeviceId();
this->dg = dim3(this->sizeBlock, 1, 1);
this->db = dim3(this->sizeArray, 1, 1);
//Output
this->tabFrequence = new int[this->sizeArray];
//Device
this->sizeTabGenerator = sizeof(curandState) * this->n;
this->sizeSharedMemory = sizeof(int) * this->sizeArray;
this->sizePtrDevTabFrequence = sizeof(int) * this->sizeArray;
HANDLE_ERROR(cudaMalloc((void**) &ptrDevTabGeneratorThread, this->sizeTabGenerator));
HANDLE_ERROR(cudaMalloc((void**) &ptrDevTabFrequence, this->sizePtrDevTabFrequence));
Device::assertDim(dg, db);
}
Histogramme::~Histogramme()
{
HANDLE_ERROR(cudaFree(ptrDevTabGeneratorThread));
HANDLE_ERROR(cudaFree(ptrDevTabFrequence));
}
void Histogramme::run()
{
runBuildGeneratorHistogramme();
runComputeHistogramme();
}
void Histogramme::runBuildGeneratorHistogramme()
{
kernelHistogrammeRand<<<dg,db>>>(ptrDevTabGeneratorThread,deviceId);
check_CUDA_Error("kernel rand");
HANDLE_ERROR(cudaDeviceSynchronize());
}
void Histogramme::runComputeHistogramme()
{
Lock lock;
kernelHistogrammeWork<<<dg,db, this->sizeSharedMemory>>>(ptrDevTabGeneratorThread, ptrDevTabFrequence, lock, n);
check_CUDA_Error("kernel work");
//Device -> Host
HANDLE_ERROR(cudaMemcpy(&tabFrequence, ptrDevTabFrequence, this->sizePtrDevTabFrequence, cudaMemcpyDeviceToHost));
}
void Histogramme::printTabFrequence()
{
for(int i=0;i<this->sizeArray;i++)
{
cout << "i=" << i << " frequence=" << this->tabFrequence[i] << endl;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
fbaaa335ee1466e68b67ddb2616ce2617e919257.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (C) 2017 Yusuke Suzuki <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/benchmark.h>
#include <gloop/statistics.h>
#include <gloop/gloop.h>
__device__ static const char* const filename = "tmp/dump";
__device__ void gmain(gloop::DeviceLoop<>* loop, int fd, unsigned char* buffer, int count, int limit, int ioSize, int loopCount)
{
volatile int res = 20;
for (int i = 0; i < loopCount; ++i)
res = i;
if (count != limit) {
gloop::fs::read(loop, fd, 0, ioSize, buffer, [=](gloop::DeviceLoop<>* loop, int) {
printf("hello\n");
gmain(loop, fd, buffer, count + 1, limit, ioSize, loopCount);
});
return;
}
printf("close\n");
gloop::fs::close(loop, fd, [=](gloop::DeviceLoop<>* loop, int error) { });
}
int main(int argc, char** argv)
{
if (argc < 5) {
fprintf(stderr, "<trials> <blocks> <pblocks> <threads> <id> <ioSize> <loopCount>\n");
return -1;
}
int trials = atoi(argv[1]);
int nblocks = atoi(argv[2]);
int physblocks = atoi(argv[3]);
int nthreads = atoi(argv[4]);
int id = atoi(argv[5]);
int ioSize = atoi(argv[6]);
int loopCount = atoi(argv[7]);
fprintf(stderr, " iterations: %d blocks %d threads %d id %d ioSize %d, loops %d\n", trials, nblocks, nthreads, id, ioSize, loopCount);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
uint32_t pipelinePageCount = 0;
dim3 blocks(nblocks);
dim3 psblocks(physblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, psblocks, pipelinePageCount);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, (1ULL << 20)));
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hostLoop->launch(*hostContext, blocks, nthreads, [] GLOOP_DEVICE_LAMBDA(gloop::DeviceLoop<> * loop, int trials, int ioSize, int loopCount) {
gloop::fs::open(loop, filename, O_RDONLY, [=](gloop::DeviceLoop<>* loop, int fd) {
__shared__ unsigned char* buffer;
BEGIN_SINGLE_THREAD
{
buffer = static_cast<unsigned char*>(malloc(ioSize));
}
END_SINGLE_THREAD
gmain(loop, fd, buffer, 0, trials, ioSize, loopCount);
});
}, trials, ioSize, loopCount);
}
}
gloop::Statistics::instance().report(stderr);
return 0;
}
|
fbaaa335ee1466e68b67ddb2616ce2617e919257.cu
|
/*
Copyright (C) 2017 Yusuke Suzuki <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gloop/benchmark.h>
#include <gloop/statistics.h>
#include <gloop/gloop.h>
__device__ static const char* const filename = "tmp/dump";
__device__ void gmain(gloop::DeviceLoop<>* loop, int fd, unsigned char* buffer, int count, int limit, int ioSize, int loopCount)
{
volatile int res = 20;
for (int i = 0; i < loopCount; ++i)
res = i;
if (count != limit) {
gloop::fs::read(loop, fd, 0, ioSize, buffer, [=](gloop::DeviceLoop<>* loop, int) {
printf("hello\n");
gmain(loop, fd, buffer, count + 1, limit, ioSize, loopCount);
});
return;
}
printf("close\n");
gloop::fs::close(loop, fd, [=](gloop::DeviceLoop<>* loop, int error) { });
}
int main(int argc, char** argv)
{
if (argc < 5) {
fprintf(stderr, "<trials> <blocks> <pblocks> <threads> <id> <ioSize> <loopCount>\n");
return -1;
}
int trials = atoi(argv[1]);
int nblocks = atoi(argv[2]);
int physblocks = atoi(argv[3]);
int nthreads = atoi(argv[4]);
int id = atoi(argv[5]);
int ioSize = atoi(argv[6]);
int loopCount = atoi(argv[7]);
fprintf(stderr, " iterations: %d blocks %d threads %d id %d ioSize %d, loops %d\n", trials, nblocks, nthreads, id, ioSize, loopCount);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
uint32_t pipelinePageCount = 0;
dim3 blocks(nblocks);
dim3 psblocks(physblocks);
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, psblocks, pipelinePageCount);
{
std::lock_guard<gloop::HostLoop::KernelLock> lock(hostLoop->kernelLock());
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, (1ULL << 20)));
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hostLoop->launch(*hostContext, blocks, nthreads, [] GLOOP_DEVICE_LAMBDA(gloop::DeviceLoop<> * loop, int trials, int ioSize, int loopCount) {
gloop::fs::open(loop, filename, O_RDONLY, [=](gloop::DeviceLoop<>* loop, int fd) {
__shared__ unsigned char* buffer;
BEGIN_SINGLE_THREAD
{
buffer = static_cast<unsigned char*>(malloc(ioSize));
}
END_SINGLE_THREAD
gmain(loop, fd, buffer, 0, trials, ioSize, loopCount);
});
}, trials, ioSize, loopCount);
}
}
gloop::Statistics::instance().report(stderr);
return 0;
}
|
47c9133b4672290adde81ad9b487986b897e8b01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda.h>
#include <hip/hip_runtime.h>
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N]; int count = 0;
float *d_data;
hipMalloc(&d_data, N * sizeof(float));hipLaunchKernelGGL((
cudakernel), dim3(N/256), dim3(256), 0, 0, d_data);
hipMemcpy(data, d_data, N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
|
47c9133b4672290adde81ad9b487986b897e8b01.cu
|
#include<stdio.h>
#include<cuda.h>
#include <cuda_runtime.h>
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N]; int count = 0;
float *d_data;
cudaMalloc(&d_data, N * sizeof(float));
cudakernel<<<N/256, 256>>>(d_data);
cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
|
a3f8b9f5cd5dc9a17ebb4f773489180efb8fe0ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
|
a3f8b9f5cd5dc9a17ebb4f773489180efb8fe0ec.cu
|
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
|
ee2aa269fe06151bc0aae52cebb7047cb1db51b2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
x[index] = x[index] + (rate * sqrtf(1.f-powf(B2, t)) / (1.f-powf(B1, t)) * m[index] / (sqrtf(v[index]) + eps));
}
|
ee2aa269fe06151bc0aae52cebb7047cb1db51b2.cu
|
#include "includes.h"
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
x[index] = x[index] + (rate * sqrtf(1.f-powf(B2, t)) / (1.f-powf(B1, t)) * m[index] / (sqrtf(v[index]) + eps));
}
|
6df63ec8f0eec48bb92e5a3ab43548d538bd1688.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<hiprand/hiprand.h>
#include"../include/datamain.th"
#include"random.h"
using namespace GS_NS;
using namespace DATA_NS;
<<<<<<< HEAD
int Random::InitRandom(int *dimArr, real mean, real variance,int seed_host,int seed_dev){
hiprandCreateGenerator(&Gen_dev,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandCreateGeneratorHost(&Gen_host,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(Gen_dev,seed_dev);
hiprandSetPseudoRandomGeneratorSeed(Gen_host,seed_host);
Init(dimArr,Data_HOST_DEV);
Mean=mean;
Variance=variance;
Seed_dev=seed_dev;
Seed_host=seed_host;
=======
int Random::InitRandom(int *dimArr, real mean, real variance,real Seed_host,real Seed_dev){
hiprandCreateGenerator(&Gen_dev,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandCreateGeneratorHost(&Gen_host,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(Gen_dev,Seed_dev);
hiprandSetPseudoRandomGeneratorSeed(Gen_host,Seed_host);
Init(dimArr,Data_HOST_DEV);
Mean=mean;
Variance=variance;
>>>>>>> origin/master
return 0;
}
Random::Random(){
<<<<<<< HEAD
Mean=0.0f;
Variance=1.0f;
Seed_dev=0;
Seed_host=0;
=======
Mean=0.0;
Variance=1.0;
Seed_dev=0.0;
Seed_host=0.0;
>>>>>>> origin/master
Gen_host = NULL;
Gen_dev = NULL;
}
Random::~Random(){
if (Gen_dev) hiprandDestroyGenerator(Gen_dev);
if (Gen_host) hiprandDestroyGenerator(Gen_host);
}
int Random::InitRandom(int n, ...){
va_list args;
va_start(args,n);
int *arr = new int [ n+ 2 ];
arr[0]=n;
for (int i=1; i<=n; i++)
arr[i] = va_arg(args,int);
va_end(args);
InitRandom(arr,Mean,Variance,Seed_host,Seed_dev);
delete[]arr;
return 0;
}
<<<<<<< HEAD
int Random::SetParas(real mean,real variance,int seed_host,int seed_dev){
=======
int Random::SetParas(real mean,real variance,real seed_host,real seed_dev){
>>>>>>> origin/master
Mean=mean;
Variance=variance;
Seed_host=seed_host;
Seed_dev=seed_dev;
hiprandSetPseudoRandomGeneratorSeed(Gen_dev,Seed_dev);
hiprandSetPseudoRandomGeneratorSeed(Gen_host,Seed_host);
return 0;
}
Random &Random::NewNormal_device(){
hiprandGenerateNormal(Gen_dev,(float*)Arr_dev,N(),Mean,Variance);//??
return *this;
}
Random &Random::NewNormal_host(){
hiprandGenerateNormal(Gen_host,(float*)Arr,N(),Mean,Variance);
return *this;
}
|
6df63ec8f0eec48bb92e5a3ab43548d538bd1688.cu
|
#include<curand.h>
#include"../include/datamain.th"
#include"random.h"
using namespace GS_NS;
using namespace DATA_NS;
<<<<<<< HEAD
int Random::InitRandom(int *dimArr, real mean, real variance,int seed_host,int seed_dev){
curandCreateGenerator(&Gen_dev,CURAND_RNG_PSEUDO_DEFAULT);
curandCreateGeneratorHost(&Gen_host,CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(Gen_dev,seed_dev);
curandSetPseudoRandomGeneratorSeed(Gen_host,seed_host);
Init(dimArr,Data_HOST_DEV);
Mean=mean;
Variance=variance;
Seed_dev=seed_dev;
Seed_host=seed_host;
=======
int Random::InitRandom(int *dimArr, real mean, real variance,real Seed_host,real Seed_dev){
curandCreateGenerator(&Gen_dev,CURAND_RNG_PSEUDO_DEFAULT);
curandCreateGeneratorHost(&Gen_host,CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(Gen_dev,Seed_dev);
curandSetPseudoRandomGeneratorSeed(Gen_host,Seed_host);
Init(dimArr,Data_HOST_DEV);
Mean=mean;
Variance=variance;
>>>>>>> origin/master
return 0;
}
Random::Random(){
<<<<<<< HEAD
Mean=0.0f;
Variance=1.0f;
Seed_dev=0;
Seed_host=0;
=======
Mean=0.0;
Variance=1.0;
Seed_dev=0.0;
Seed_host=0.0;
>>>>>>> origin/master
Gen_host = NULL;
Gen_dev = NULL;
}
Random::~Random(){
if (Gen_dev) curandDestroyGenerator(Gen_dev);
if (Gen_host) curandDestroyGenerator(Gen_host);
}
int Random::InitRandom(int n, ...){
va_list args;
va_start(args,n);
int *arr = new int [ n+ 2 ];
arr[0]=n;
for (int i=1; i<=n; i++)
arr[i] = va_arg(args,int);
va_end(args);
InitRandom(arr,Mean,Variance,Seed_host,Seed_dev);
delete[]arr;
return 0;
}
<<<<<<< HEAD
int Random::SetParas(real mean,real variance,int seed_host,int seed_dev){
=======
int Random::SetParas(real mean,real variance,real seed_host,real seed_dev){
>>>>>>> origin/master
Mean=mean;
Variance=variance;
Seed_host=seed_host;
Seed_dev=seed_dev;
curandSetPseudoRandomGeneratorSeed(Gen_dev,Seed_dev);
curandSetPseudoRandomGeneratorSeed(Gen_host,Seed_host);
return 0;
}
Random &Random::NewNormal_device(){
curandGenerateNormal(Gen_dev,(float*)Arr_dev,N(),Mean,Variance);//??
return *this;
}
Random &Random::NewNormal_host(){
curandGenerateNormal(Gen_host,(float*)Arr,N(),Mean,Variance);
return *this;
}
|
685814265664dbb0fca9ba3d0f2068faa99cb64a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<float.h> //DBL_MAX
#include <hip/hip_runtime_api.h>
#define restrict __restrict__
#define PADDINGCLASS -2
#define OUTPUT_FILE "ocuda"
#define INPUT_FILE "data"
#define KMAX 20
#define SPACEDIMMAX 150
#define CLASSESMAX 100
void check_error(hipError_t err, const char *msg);
void printStats(hipEvent_t before, hipEvent_t after, const char *msg);
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements);
void writeOutput(float* coords, int* classes, int spacedim, int numels);
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num);
__global__ void knnPunisher(float2* kOutput, int* d_classes, int numels, int newels, int k, int classes_num);
__device__ float distance(float* coords, float* coords2, int spacedim);
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid);
__device__ void swapfloat2(float2* d1, float2* d2);
__device__ int deviceFindMode(int* kclasses, int classes_num, int k);
__device__ float distanceShm(float* coords, int left, int spacedim);
//Declaration of shared-memory. It's going to contains partial minimum of distances
extern __shared__ int mPartial[];
int main(int argc, char *argv[])
{
int newels; //number of points we want classify
int k; //number of nearest points we use to classify
int numels; //total element already classified
int spacedim;
char filePath[255]; //path + filname of input file
int classes_num; //number of classes
float* h_coords; //coords of existing points with a class
int* h_classes; //array contains the class for each points
//*** Device-variables-declaration ***
float* d_coords;
int2* d_determinate;
int* d_classes;
float2* d_kOutput;
//*** end-device-declaration
//***cudaEvent-declaration***
hipEvent_t before_allocation, before_input, before_upload, before_knn, before_download;
hipEvent_t after_allocation, after_input, after_upload, after_knn, after_download;
//***end-cudaEvent-declaration***
//Requisiti: numels e newels devono essere maggiori di K
if (argc > 2)
{
strcpy(filePath, argv[1]);
k = atoi(argv[2]);
}
else
{
printf("how-to-use: knn <inputfile> <k> \n");
exit(1);
}
//***cuda-init-event***
check_error(hipEventCreate(&before_allocation), "create before_allocation cudaEvent");
check_error(hipEventCreate(&before_input), "create before_input cudaEvent");
check_error(hipEventCreate(&before_upload), "create before_upload cudaEvent");
check_error(hipEventCreate(&before_knn), "create before_knn cudaEvent");
check_error(hipEventCreate(&before_download), "create before_download cudaEvent");
check_error(hipEventCreate(&after_allocation), "create after_allocation cudaEvent");
check_error(hipEventCreate(&after_input), "create after_input cudaEvent");
check_error(hipEventCreate(&after_upload), "create after_upload cudaEvent");
check_error(hipEventCreate(&after_knn), "create after_knn cudaEvent");
check_error(hipEventCreate(&after_download), "create after_download cudaEvent");
//***end-cuda-init-event***
FILE *fp;
if((fp = fopen(filePath, "r")) == NULL)
{
printf("No such file\n");
exit(1);
}
fseek(fp, 0L, SEEK_END);
float fileSize = ftell(fp);
rewind(fp);
int count = fscanf(fp, "%d,%d,%d,%d\n", &numels, &newels, &classes_num, &spacedim);
int totalElements = numels + newels;
//*** allocation ***
hipEventRecord(before_allocation);
h_coords = (float*) malloc(sizeof(float)*totalElements*spacedim);
h_classes = (int*) malloc(sizeof(int)*totalElements);
//*** device-allocation ***
check_error(hipMalloc(&d_coords, totalElements*spacedim*sizeof(float)), "alloc d_coords_x");
check_error(hipMalloc(&d_classes, totalElements*sizeof(int)), "alloc d_classes");
check_error(hipMalloc(&d_determinate, newels*2*sizeof(int)), "alloc d_determinate");
check_error(hipMalloc(&d_kOutput, newels*KMAX*2*sizeof(float)), "alloc d_kOutput");
//*** end-device-allocation ***
hipEventRecord(after_allocation);
///***input-from-file***
hipEventRecord(before_input);
readInput(fp, h_coords, h_classes, spacedim, numels, totalElements);
hipEventRecord(after_input);
fclose(fp);
///***end-input-from-file***
//***copy-arrays-on-device***
hipEventRecord(before_upload);
check_error(hipMemcpy(d_coords, h_coords, totalElements*spacedim*sizeof(float), hipMemcpyHostToDevice), "copy d_coords");
check_error(hipMemcpy(d_classes, h_classes, totalElements*sizeof(int), hipMemcpyHostToDevice), "copy d_classes");
hipEventRecord(after_upload);
//***end-copy-arrays-on-device***
const int blockSize = 512;
int numBlocks = (newels + blockSize - 1)/blockSize;
hipEventRecord(before_knn);
hipLaunchKernelGGL(( knn), dim3(numBlocks), dim3(blockSize), 0, 0, d_coords, d_kOutput, totalElements, numels, spacedim, k, d_classes, classes_num);
for (int i = 0; i < newels; i++)
{
hipLaunchKernelGGL(( knnPunisher), dim3(numBlocks), dim3(blockSize), newels*sizeof(int)*2, 0, d_kOutput, d_classes, numels, newels, k, classes_num);
}
hipEventRecord(after_knn);
check_error(hipMemcpy(h_classes+numels, d_classes+numels, newels*sizeof(int), hipMemcpyDeviceToHost), "download classes");
check_error(hipEventSynchronize(after_knn), "sync cudaEvents");
printStats(before_knn, after_knn, "knn");
writeOutput(h_coords, h_classes, spacedim, totalElements);
return 0;
}
void check_error(hipError_t err, const char *msg)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s : error %d (%s)\n", msg, err, hipGetErrorString(err));
exit(err);
}
}
float runtime;
void printStats(hipEvent_t before, hipEvent_t after, const char *msg)
{
check_error(hipEventElapsedTime(&runtime, before, after), msg);
printf("%s %gms\n", msg, runtime);
}
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num)
{
int gid = numels + threadIdx.x + blockIdx.x*blockDim.x; //id del punto da determinare
if (gid >= totalElements) return;
float* newPointCoords = coords+spacedim*gid;
float* pointCoords;
float2 kPoints[KMAX];
int i = 0, size = 0, count = 0;
float2 dist;
for (i = 0; i < numels; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
insert(kPoints, dist, &size, k, gid);
}
for (count=0; i < gid; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
count += insert(kPoints, dist, &size, k, gid);
}
if (count > 0)
{
classes[gid] = -1;
}
else
{
int kclasses[KMAX];
for (int j = 0; j < k; j++)
kclasses[j] = classes[(int)(kPoints[j].y)];
classes[gid] = deviceFindMode(kclasses, classes_num, k);
}
//copia kPoints in kOutput
int newelId = gid-numels;
for (i = 0; i < k; i++)
kOutput[newelId*KMAX + i] = kPoints[i];
}
__global__ void knnPunisher(float2* kOutput, int* classes, int numels, int newels, int k, int classes_num)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
int lid = threadIdx.x;
int i = lid;
while (i < gid)
{
mPartial[i] = classes[i+numels];
i += blockDim.x;
}
if (gid < newels)
mPartial[gid] = classes[gid+numels];
if (gid >= newels || mPartial[gid] != -1) return;
//Se sono qui la classe per il kPoint da determinare
int kPoints[KMAX];
for (int i = 0; i < k; i++)
kPoints[i] = kOutput[gid*KMAX+i].y; //gid
//Le sue dipendenze, se gi determinate stanno nella shared-memory
int count = 0;
for (int i = k-1; i >= 0; i--)
{
int id = kPoints[i];
int lid = id - numels;
if (id > numels && mPartial[lid] < 0)
{
//segno quelli indeterminati
count++;
break;
}
}
if (count == 0)
{
//posso determinare il punto
//le sue dipendenze si trovano in shared memory
int kclasses[KMAX];
for (int j = 0; j < k; j++)
kclasses[j] = classes[kPoints[j]];
int newClass = deviceFindMode(kclasses, classes_num, k);
classes[gid+numels] = newClass;
}
}
__device__ int deviceFindMode(int* kclasses, int classes_num, int k)
{
int classesCount[CLASSESMAX];
int i;
int temp=0;
for (i = 0; i < CLASSESMAX; i++)
classesCount[i] = 0;
for (i = 0; i < k; i++){
temp=kclasses[i];
classesCount[temp]+=1;
}
int max = 0;
int maxValue = classesCount[0];
for (i = 1; i < classes_num; i++)
{
int value = classesCount[i];
if (value > maxValue)
{
max = i;
maxValue = value;
}
else if (value != 0 && maxValue == value)
{
int j = 0;
for (j = 0; j < k; j++)
{
if (kclasses[j] == i)
{
max = i;
break;
}
else if (kclasses[j] == max)
break;
}
}
}
return max;
}
//inserimento smart in kPoints
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid)
{
int inserted = 0;
if (*size == 0)
{
//Caso base: inserimento su array vuoto
kPoints[0] = newDist;
*size = *size + 1;
return 1;
}
int i = 1;
float2* value = &newDist; //nuovo elemento
float2* tail = &(kPoints[*size-i]);
if (*size < k)
{
kPoints[*size] = newDist;
value = &(kPoints[*size]);
inserted = 1;
}
//partire della fine, swap se trovo elemento pi grande - mi fermo se trovo elemento pi piccolo
while (i <= *size && (*tail).x > (*value).x)
{
swapfloat2(tail, value);
value = tail;
i++;
tail = &(kPoints[*size-i]);
inserted = 1;
}
if (inserted && *size < k) *size = *size + 1;
return inserted;
}
__device__ void swapfloat2(float2* d1, float2* d2)
{
//da provate tmp = d1, d1 = d2, d2 = tmp
float2 tmp;
tmp.x = (*d1).x;
tmp.y = (*d1).y;
(*d1).x = (*d2).x;
(*d1).y = (*d2).y;
(*d2).x = tmp.x;
(*d2).y = tmp.y;
}
// read input from file
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements)
{
int i, j;
int count;
for(i=0; i<numels; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim +j]));
count = fscanf(file, "%d\n", &(classes[i]));
}
for(; i < totalElements; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim+j]));
count = fscanf(file, "-1\n");
}
count++;
}
//Write Output on file
void writeOutput(float* coords, int* classes, int spacedim, int numels)
{
FILE *fp;
fp = fopen(OUTPUT_FILE, "w");
int i, j;
for( i = 0; i < numels; i++)
{
for (j = 0; j < spacedim; j++)
fprintf(fp, "%lf,", coords[i*spacedim+j]);
fprintf(fp, "%d\n", classes[i]);
}
fclose(fp);
}
//multidimensional euclidian distance (without sqrt)
__device__ float distance(float* coords, float* coords2, int spacedim)
{
float sum = 0;
int i;
for (i = 0; i < spacedim; i++)
{
float diff = coords[i] - coords2[i];
sum += diff*diff;
}
return sum;
}
|
685814265664dbb0fca9ba3d0f2068faa99cb64a.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<float.h> //DBL_MAX
#include <cuda_runtime_api.h>
#define restrict __restrict__
#define PADDINGCLASS -2
#define OUTPUT_FILE "ocuda"
#define INPUT_FILE "data"
#define KMAX 20
#define SPACEDIMMAX 150
#define CLASSESMAX 100
void check_error(cudaError_t err, const char *msg);
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg);
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements);
void writeOutput(float* coords, int* classes, int spacedim, int numels);
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num);
__global__ void knnPunisher(float2* kOutput, int* d_classes, int numels, int newels, int k, int classes_num);
__device__ float distance(float* coords, float* coords2, int spacedim);
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid);
__device__ void swapfloat2(float2* d1, float2* d2);
__device__ int deviceFindMode(int* kclasses, int classes_num, int k);
__device__ float distanceShm(float* coords, int left, int spacedim);
//Declaration of shared-memory. It's going to contains partial minimum of distances
extern __shared__ int mPartial[];
int main(int argc, char *argv[])
{
int newels; //number of points we want classify
int k; //number of nearest points we use to classify
int numels; //total element already classified
int spacedim;
char filePath[255]; //path + filname of input file
int classes_num; //number of classes
float* h_coords; //coords of existing points with a class
int* h_classes; //array contains the class for each points
//*** Device-variables-declaration ***
float* d_coords;
int2* d_determinate;
int* d_classes;
float2* d_kOutput;
//*** end-device-declaration
//***cudaEvent-declaration***
cudaEvent_t before_allocation, before_input, before_upload, before_knn, before_download;
cudaEvent_t after_allocation, after_input, after_upload, after_knn, after_download;
//***end-cudaEvent-declaration***
//Requisiti: numels e newels devono essere maggiori di K
if (argc > 2)
{
strcpy(filePath, argv[1]);
k = atoi(argv[2]);
}
else
{
printf("how-to-use: knn <inputfile> <k> \n");
exit(1);
}
//***cuda-init-event***
check_error(cudaEventCreate(&before_allocation), "create before_allocation cudaEvent");
check_error(cudaEventCreate(&before_input), "create before_input cudaEvent");
check_error(cudaEventCreate(&before_upload), "create before_upload cudaEvent");
check_error(cudaEventCreate(&before_knn), "create before_knn cudaEvent");
check_error(cudaEventCreate(&before_download), "create before_download cudaEvent");
check_error(cudaEventCreate(&after_allocation), "create after_allocation cudaEvent");
check_error(cudaEventCreate(&after_input), "create after_input cudaEvent");
check_error(cudaEventCreate(&after_upload), "create after_upload cudaEvent");
check_error(cudaEventCreate(&after_knn), "create after_knn cudaEvent");
check_error(cudaEventCreate(&after_download), "create after_download cudaEvent");
//***end-cuda-init-event***
FILE *fp;
if((fp = fopen(filePath, "r")) == NULL)
{
printf("No such file\n");
exit(1);
}
fseek(fp, 0L, SEEK_END);
float fileSize = ftell(fp);
rewind(fp);
int count = fscanf(fp, "%d,%d,%d,%d\n", &numels, &newels, &classes_num, &spacedim);
int totalElements = numels + newels;
//*** allocation ***
cudaEventRecord(before_allocation);
h_coords = (float*) malloc(sizeof(float)*totalElements*spacedim);
h_classes = (int*) malloc(sizeof(int)*totalElements);
//*** device-allocation ***
check_error(cudaMalloc(&d_coords, totalElements*spacedim*sizeof(float)), "alloc d_coords_x");
check_error(cudaMalloc(&d_classes, totalElements*sizeof(int)), "alloc d_classes");
check_error(cudaMalloc(&d_determinate, newels*2*sizeof(int)), "alloc d_determinate");
check_error(cudaMalloc(&d_kOutput, newels*KMAX*2*sizeof(float)), "alloc d_kOutput");
//*** end-device-allocation ***
cudaEventRecord(after_allocation);
///***input-from-file***
cudaEventRecord(before_input);
readInput(fp, h_coords, h_classes, spacedim, numels, totalElements);
cudaEventRecord(after_input);
fclose(fp);
///***end-input-from-file***
//***copy-arrays-on-device***
cudaEventRecord(before_upload);
check_error(cudaMemcpy(d_coords, h_coords, totalElements*spacedim*sizeof(float), cudaMemcpyHostToDevice), "copy d_coords");
check_error(cudaMemcpy(d_classes, h_classes, totalElements*sizeof(int), cudaMemcpyHostToDevice), "copy d_classes");
cudaEventRecord(after_upload);
//***end-copy-arrays-on-device***
const int blockSize = 512;
int numBlocks = (newels + blockSize - 1)/blockSize;
cudaEventRecord(before_knn);
knn<<<numBlocks, blockSize>>>(d_coords, d_kOutput, totalElements, numels, spacedim, k, d_classes, classes_num);
for (int i = 0; i < newels; i++)
{
knnPunisher<<<numBlocks, blockSize, newels*sizeof(int)*2>>>(d_kOutput, d_classes, numels, newels, k, classes_num);
}
cudaEventRecord(after_knn);
check_error(cudaMemcpy(h_classes+numels, d_classes+numels, newels*sizeof(int), cudaMemcpyDeviceToHost), "download classes");
check_error(cudaEventSynchronize(after_knn), "sync cudaEvents");
printStats(before_knn, after_knn, "knn");
writeOutput(h_coords, h_classes, spacedim, totalElements);
return 0;
}
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s : error %d (%s)\n", msg, err, cudaGetErrorString(err));
exit(err);
}
}
float runtime;
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg)
{
check_error(cudaEventElapsedTime(&runtime, before, after), msg);
printf("%s %gms\n", msg, runtime);
}
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num)
{
int gid = numels + threadIdx.x + blockIdx.x*blockDim.x; //id del punto da determinare
if (gid >= totalElements) return;
float* newPointCoords = coords+spacedim*gid;
float* pointCoords;
float2 kPoints[KMAX];
int i = 0, size = 0, count = 0;
float2 dist;
for (i = 0; i < numels; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
insert(kPoints, dist, &size, k, gid);
}
for (count=0; i < gid; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
count += insert(kPoints, dist, &size, k, gid);
}
if (count > 0)
{
classes[gid] = -1;
}
else
{
int kclasses[KMAX];
for (int j = 0; j < k; j++)
kclasses[j] = classes[(int)(kPoints[j].y)];
classes[gid] = deviceFindMode(kclasses, classes_num, k);
}
//copia kPoints in kOutput
int newelId = gid-numels;
for (i = 0; i < k; i++)
kOutput[newelId*KMAX + i] = kPoints[i];
}
__global__ void knnPunisher(float2* kOutput, int* classes, int numels, int newels, int k, int classes_num)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
int lid = threadIdx.x;
int i = lid;
while (i < gid)
{
mPartial[i] = classes[i+numels];
i += blockDim.x;
}
if (gid < newels)
mPartial[gid] = classes[gid+numels];
if (gid >= newels || mPartial[gid] != -1) return;
//Se sono qui la classe per il kPoint è da determinare
int kPoints[KMAX];
for (int i = 0; i < k; i++)
kPoints[i] = kOutput[gid*KMAX+i].y; //gid
//Le sue dipendenze, se già determinate stanno nella shared-memory
int count = 0;
for (int i = k-1; i >= 0; i--)
{
int id = kPoints[i];
int lid = id - numels;
if (id > numels && mPartial[lid] < 0)
{
//segno quelli indeterminati
count++;
break;
}
}
if (count == 0)
{
//posso determinare il punto
//le sue dipendenze si trovano in shared memory
int kclasses[KMAX];
for (int j = 0; j < k; j++)
kclasses[j] = classes[kPoints[j]];
int newClass = deviceFindMode(kclasses, classes_num, k);
classes[gid+numels] = newClass;
}
}
__device__ int deviceFindMode(int* kclasses, int classes_num, int k)
{
int classesCount[CLASSESMAX];
int i;
int temp=0;
for (i = 0; i < CLASSESMAX; i++)
classesCount[i] = 0;
for (i = 0; i < k; i++){
temp=kclasses[i];
classesCount[temp]+=1;
}
int max = 0;
int maxValue = classesCount[0];
for (i = 1; i < classes_num; i++)
{
int value = classesCount[i];
if (value > maxValue)
{
max = i;
maxValue = value;
}
else if (value != 0 && maxValue == value)
{
int j = 0;
for (j = 0; j < k; j++)
{
if (kclasses[j] == i)
{
max = i;
break;
}
else if (kclasses[j] == max)
break;
}
}
}
return max;
}
//inserimento smart in kPoints
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid)
{
int inserted = 0;
if (*size == 0)
{
//Caso base: inserimento su array vuoto
kPoints[0] = newDist;
*size = *size + 1;
return 1;
}
int i = 1;
float2* value = &newDist; //nuovo elemento
float2* tail = &(kPoints[*size-i]);
if (*size < k)
{
kPoints[*size] = newDist;
value = &(kPoints[*size]);
inserted = 1;
}
//partire della fine, swap se trovo elemento più grande - mi fermo se trovo elemento più piccolo
while (i <= *size && (*tail).x > (*value).x)
{
swapfloat2(tail, value);
value = tail;
i++;
tail = &(kPoints[*size-i]);
inserted = 1;
}
if (inserted && *size < k) *size = *size + 1;
return inserted;
}
__device__ void swapfloat2(float2* d1, float2* d2)
{
//da provate tmp = d1, d1 = d2, d2 = tmp
float2 tmp;
tmp.x = (*d1).x;
tmp.y = (*d1).y;
(*d1).x = (*d2).x;
(*d1).y = (*d2).y;
(*d2).x = tmp.x;
(*d2).y = tmp.y;
}
// read input from file
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements)
{
int i, j;
int count;
for(i=0; i<numels; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim +j]));
count = fscanf(file, "%d\n", &(classes[i]));
}
for(; i < totalElements; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim+j]));
count = fscanf(file, "-1\n");
}
count++;
}
//Write Output on file
void writeOutput(float* coords, int* classes, int spacedim, int numels)
{
FILE *fp;
fp = fopen(OUTPUT_FILE, "w");
int i, j;
for( i = 0; i < numels; i++)
{
for (j = 0; j < spacedim; j++)
fprintf(fp, "%lf,", coords[i*spacedim+j]);
fprintf(fp, "%d\n", classes[i]);
}
fclose(fp);
}
//multidimensional euclidian distance (without sqrt)
__device__ float distance(float* coords, float* coords2, int spacedim)
{
float sum = 0;
int i;
for (i = 0; i < spacedim; i++)
{
float diff = coords[i] - coords2[i];
sum += diff*diff;
}
return sum;
}
|
1990eb608155ff4dcceb6212262dc87120e75096.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "DS_timer.h"
#define DATA_SIZE (1024 * 1024 * 256)
#define DATA_RANGE (256)
void printHist(int * arr, char * str);
__global__ void histogram_atomic(float * a, int * histo, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= n) return;
atomicAdd(histo + (int)a[tid], 1);
}
__global__ void histogram_shared(float * a, int * histo, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int sh[DATA_RANGE];
if(threadIdx.x < 256) sh[threadIdx.x] = 0;
__syncthreads();
if(tid < n) atomicAdd(&sh[(int)a[tid]], 1);
__syncthreads();
if(threadIdx.x < 256) atomicAdd(&histo[threadIdx.x], sh[threadIdx.x]);
}
int main()
{
float * arr, * d_arr;
int * a, * b, * c;
int * d_b, *d_c;
DS_timer timer(3);
arr = (float *) malloc(sizeof(float) * DATA_SIZE);
a = (int *) malloc(sizeof(int) * DATA_RANGE);
b = (int *) malloc(sizeof(int) * DATA_RANGE);
c = (int *) malloc(sizeof(int) * DATA_RANGE);
for(int i = 0; i < DATA_SIZE; i++)
arr[i] = rand() % DATA_RANGE;
for(int i = 0; i < DATA_RANGE; i++)
a[i] = 0;
timer.initTimers();
// CPU version
timer.setTimerName(0, (char *)"CPU");
timer.onTimer(0);
for(int i = 0; i < DATA_SIZE; i++)
a[(int) arr[i]]++;
timer.offTimer(0);
printHist(a, (char *)"Serial version");
// Global Sync version
hipMalloc((void **)&d_arr, sizeof(float) * DATA_SIZE);
hipMalloc((void **)&d_b, sizeof(int) * DATA_RANGE);
hipMemcpy(d_arr, arr, sizeof(float) * DATA_SIZE, hipMemcpyHostToDevice);
hipMemset(d_b, 0, sizeof(int) * DATA_RANGE);
timer.setTimerName(1, (char *)"Global Sync");
timer.onTimer(1);
hipLaunchKernelGGL(( histogram_atomic), dim3(DATA_SIZE / 256), dim3(256), 0, 0, d_arr, d_b, DATA_SIZE);
hipDeviceSynchronize();
timer.offTimer(1);
hipMemcpy(b, d_b, sizeof(int) * DATA_RANGE, hipMemcpyDeviceToHost);
printHist(b, (char *) "Global Sync");
// Shared Sync version
hipMalloc((void **)&d_c, sizeof(int) * DATA_RANGE);
hipMemset(d_c, 0, sizeof(int) * DATA_RANGE);
timer.setTimerName(2, (char *)"Shared Sync");
timer.onTimer(2);
hipLaunchKernelGGL(( histogram_shared), dim3(DATA_SIZE / 256), dim3(256), 0, 0, d_arr, d_c, DATA_SIZE);
hipDeviceSynchronize();
timer.offTimer(2);
hipMemcpy(c, d_c, sizeof(int) * DATA_RANGE, hipMemcpyDeviceToHost);
printHist(c, (char *) "Shared Sync");
timer.printTimer();
free(arr);
free(a); free(b);
hipFree(d_arr); hipFree(d_b); hipFree(d_c);
return 0;
}
void printHist(int * arr, char * str)
{
printf("\t<< %s >>\n", str);
for(int i = 0; i < DATA_RANGE; i++)
{
printf(" <%3d : %d>", i, arr[i]);
if(i % 8 == 7)
printf("\n");
}
printf("\n");
}
|
1990eb608155ff4dcceb6212262dc87120e75096.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "DS_timer.h"
#define DATA_SIZE (1024 * 1024 * 256)
#define DATA_RANGE (256)
void printHist(int * arr, char * str);
__global__ void histogram_atomic(float * a, int * histo, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= n) return;
atomicAdd(histo + (int)a[tid], 1);
}
__global__ void histogram_shared(float * a, int * histo, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int sh[DATA_RANGE];
if(threadIdx.x < 256) sh[threadIdx.x] = 0;
__syncthreads();
if(tid < n) atomicAdd(&sh[(int)a[tid]], 1);
__syncthreads();
if(threadIdx.x < 256) atomicAdd(&histo[threadIdx.x], sh[threadIdx.x]);
}
int main()
{
float * arr, * d_arr;
int * a, * b, * c;
int * d_b, *d_c;
DS_timer timer(3);
arr = (float *) malloc(sizeof(float) * DATA_SIZE);
a = (int *) malloc(sizeof(int) * DATA_RANGE);
b = (int *) malloc(sizeof(int) * DATA_RANGE);
c = (int *) malloc(sizeof(int) * DATA_RANGE);
for(int i = 0; i < DATA_SIZE; i++)
arr[i] = rand() % DATA_RANGE;
for(int i = 0; i < DATA_RANGE; i++)
a[i] = 0;
timer.initTimers();
// CPU version
timer.setTimerName(0, (char *)"CPU");
timer.onTimer(0);
for(int i = 0; i < DATA_SIZE; i++)
a[(int) arr[i]]++;
timer.offTimer(0);
printHist(a, (char *)"Serial version");
// Global Sync version
cudaMalloc((void **)&d_arr, sizeof(float) * DATA_SIZE);
cudaMalloc((void **)&d_b, sizeof(int) * DATA_RANGE);
cudaMemcpy(d_arr, arr, sizeof(float) * DATA_SIZE, cudaMemcpyHostToDevice);
cudaMemset(d_b, 0, sizeof(int) * DATA_RANGE);
timer.setTimerName(1, (char *)"Global Sync");
timer.onTimer(1);
histogram_atomic<<<DATA_SIZE / 256, 256>>>(d_arr, d_b, DATA_SIZE);
cudaThreadSynchronize();
timer.offTimer(1);
cudaMemcpy(b, d_b, sizeof(int) * DATA_RANGE, cudaMemcpyDeviceToHost);
printHist(b, (char *) "Global Sync");
// Shared Sync version
cudaMalloc((void **)&d_c, sizeof(int) * DATA_RANGE);
cudaMemset(d_c, 0, sizeof(int) * DATA_RANGE);
timer.setTimerName(2, (char *)"Shared Sync");
timer.onTimer(2);
histogram_shared<<<DATA_SIZE / 256, 256>>>(d_arr, d_c, DATA_SIZE);
cudaThreadSynchronize();
timer.offTimer(2);
cudaMemcpy(c, d_c, sizeof(int) * DATA_RANGE, cudaMemcpyDeviceToHost);
printHist(c, (char *) "Shared Sync");
timer.printTimer();
free(arr);
free(a); free(b);
cudaFree(d_arr); cudaFree(d_b); cudaFree(d_c);
return 0;
}
void printHist(int * arr, char * str)
{
printf("\t<< %s >>\n", str);
for(int i = 0; i < DATA_RANGE; i++)
{
printf(" <%3d : %d>", i, arr[i]);
if(i % 8 == 7)
printf("\n");
}
printf("\n");
}
|
245e773460d52463774d0fb1c942c0680243b9e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_maxout.h"
namespace anakin {
namespace saber {
template <typename dtype>
__global__ void max_out(const dtype* input_ptr, dtype* output_ptr, const int count,
const int num_out, const int c_out, const int h_out, const int w_out, const int groups) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int thread_num = blockDim.x * gridDim.x;
int feature_size = h_out * w_out;
int feature_map_size = feature_size * c_out;
for (int i = tid; i < count; i += thread_num) {
int batch_index = i / feature_map_size;
int channel_index = (i / feature_size) % c_out;
int feature_inner_index = i % feature_size;
int src_index = (batch_index * feature_map_size + channel_index * feature_size) * groups + feature_inner_index;
dtype max = input_ptr[src_index]; //get first element.
for (int j = 1; j < groups; j++) {
dtype tmp = input_ptr[src_index + j * feature_size];
max = max < tmp ? tmp: max;
}
output_ptr[i] = max;
}
}
template <DataType OpDtype>
SaberStatus SaberMaxOut<NV, OpDtype>::dispatch(const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
MaxOutParam<NV>& param) {
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
const OpDataType* input_ptr = (const OpDataType*)inputs[0]->data();
OpDataType* output_ptr = (OpDataType*)outputs[0]->mutable_data();
int count = outputs[0]->valid_size();
hipLaunchKernelGGL(( max_out<OpDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
input_ptr,
output_ptr,
count,
_num_out,
_c_out,
_h_out,
_w_out,
param.groups
);
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
template class SaberMaxOut<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberMaxOut, MaxOutParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberMaxOut, MaxOutParam, NV, AK_INT8);
} // namespace saber.
} // namespace anakin.
|
245e773460d52463774d0fb1c942c0680243b9e9.cu
|
#include "saber/funcs/impl/cuda/saber_maxout.h"
namespace anakin {
namespace saber {
template <typename dtype>
__global__ void max_out(const dtype* input_ptr, dtype* output_ptr, const int count,
const int num_out, const int c_out, const int h_out, const int w_out, const int groups) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int thread_num = blockDim.x * gridDim.x;
int feature_size = h_out * w_out;
int feature_map_size = feature_size * c_out;
for (int i = tid; i < count; i += thread_num) {
int batch_index = i / feature_map_size;
int channel_index = (i / feature_size) % c_out;
int feature_inner_index = i % feature_size;
int src_index = (batch_index * feature_map_size + channel_index * feature_size) * groups + feature_inner_index;
dtype max = input_ptr[src_index]; //get first element.
for (int j = 1; j < groups; j++) {
dtype tmp = input_ptr[src_index + j * feature_size];
max = max < tmp ? tmp: max;
}
output_ptr[i] = max;
}
}
template <DataType OpDtype>
SaberStatus SaberMaxOut<NV, OpDtype>::dispatch(const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
MaxOutParam<NV>& param) {
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
const OpDataType* input_ptr = (const OpDataType*)inputs[0]->data();
OpDataType* output_ptr = (OpDataType*)outputs[0]->mutable_data();
int count = outputs[0]->valid_size();
max_out<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
input_ptr,
output_ptr,
count,
_num_out,
_c_out,
_h_out,
_w_out,
param.groups
);
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
template class SaberMaxOut<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberMaxOut, MaxOutParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberMaxOut, MaxOutParam, NV, AK_INT8);
} // namespace saber.
} // namespace anakin.
|
9369bb788f8e371ba1a274da4d4ffc1dc3bb32cc.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* test_DeviceReader.cu
*
* Created on: Mar 14, 2014
* Author: vogt
*/
#include "gmock/gmock.h"
#include "cudacommon/DeviceCommunicator.h"
#include "cudacommon/cuda_error.h"
using namespace culgt;
using namespace ::testing;
class ADeviceCommunicator: public Test
{
public:
float* cudaMemory;
const float someValue = 1.523;
const int someIndex = 4;
void SetUp()
{
CUDA_SAFE_CALL( hipMalloc( &cudaMemory, sizeof( float )*10 ), "malloc" );
}
void TearDown()
{
hipFree( cudaMemory );
}
};
TEST_F( ADeviceCommunicator, SetGetValueFloat )
{
DeviceCommunicator<float>::setValue( cudaMemory, someIndex, someValue );
ASSERT_FLOAT_EQ( someValue, DeviceCommunicator<float>::getValue( cudaMemory, someIndex ) );
}
|
9369bb788f8e371ba1a274da4d4ffc1dc3bb32cc.cu
|
/**
* test_DeviceReader.cu
*
* Created on: Mar 14, 2014
* Author: vogt
*/
#include "gmock/gmock.h"
#include "cudacommon/DeviceCommunicator.h"
#include "cudacommon/cuda_error.h"
using namespace culgt;
using namespace ::testing;
class ADeviceCommunicator: public Test
{
public:
float* cudaMemory;
const float someValue = 1.523;
const int someIndex = 4;
void SetUp()
{
CUDA_SAFE_CALL( cudaMalloc( &cudaMemory, sizeof( float )*10 ), "malloc" );
}
void TearDown()
{
cudaFree( cudaMemory );
}
};
TEST_F( ADeviceCommunicator, SetGetValueFloat )
{
DeviceCommunicator<float>::setValue( cudaMemory, someIndex, someValue );
ASSERT_FLOAT_EQ( someValue, DeviceCommunicator<float>::getValue( cudaMemory, someIndex ) );
}
|
85ea1ba22bfb1ddaf4765569ccc1c0ef61e6419c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#define CHECK_CUDA_ERROR(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
char str[1024]; \
sprintf_s(str, 1024, "Cuda Error: %s:%d, cuda:%d, reason: %s\n", __FILE__, __LINE__, error, hipGetErrorString(error)); \
return str; \
} \
} \
namespace cuda
{
#if 0
} // indent guard
#endif
constexpr size_t BLOCK_SIZE = 32;
__device__ __host__ void updateObjectPosition(Object & object, double updateTime)
{
object.centerX_ += object.velocityX_ * updateTime;
object.centerY_ += object.velocityY_ * updateTime;
}
__device__ __host__ void updateObjectVelocity(Object & object, double updateTime)
{
object.velocityX_ += object.accelerationX_ * updateTime;
object.velocityY_ += object.accelerationY_ * updateTime;
}
__device__ __host__ bool checkCollisionObjects(const Object & obj1, const Object & obj2, double deltatime, double & collisionTime)
{
double Dx0 = obj1.centerX_ - obj2.centerX_;
double Dy0 = obj1.centerY_ - obj2.centerY_;
double Dx1 = obj1.centerX_ - obj2.centerX_ + obj1.velocityX_ - obj2.velocityX_;
double Dy1 = obj1.centerY_ - obj2.centerY_ + obj1.velocityY_ - obj2.velocityY_;
double dDx = Dx1 - Dx0;
double dDy = Dy1 - Dy0;
double P = dDx * dDx + dDy * dDy;
double Q = dDx * Dx0 + dDy * Dy0;
double R = Dx0 * Dx0 + Dy0 * Dy0;
double r12S = (obj1.radius_ + obj2.radius_) * (obj1.radius_ + obj2.radius_);
if (R < r12S)
{
collisionTime = 0.0;
return true;
}
if (P < DBL_EPSILON)
{
return false;
}
double judge = Q * Q - P * (R - r12S);
if (judge < 0)
{
return false;
}
double aqrtJudge = sqrt(judge);
double T1 = (-Q + judge) / P;
double T2 = (-Q - judge) / P;
double T = T1 < T2 ? T1 : T2;
if (T < 0 || deltatime < T)
{
return false;
}
collisionTime = T;
return true;
}
__device__ __host__ bool checkCollisionObjectLine(const Object & obj, const LineObject & line, double deltatime, double & collisionTime)
{
double Nx = line.y1_ - line.y2_;
double Ny = line.x1_ - line.x2_;
double NN = sqrt(Nx * Nx + Ny * Ny);
if (NN < DBL_EPSILON)
{
return false;
}
double PN = abs((obj.centerX_ - line.x1_) * Nx + (obj.centerY_ - line.y1_) * Ny);
double VN = obj.velocityX_ * Nx + obj.velocityY_ * Ny;
double D = PN / NN - obj.radius_;
double dD = VN / NN;
if (dD * dD < DBL_EPSILON)
{
return false;
}
double T = D / dD;
if (T < 0 || deltatime < T)
{
return false;
}
collisionTime = T;
return true;
}
__device__ __host__ void collisionProcessObjects(Object & obj1, Object & obj2, const double objectMass, const double coefficientOfRestitution)
{
double Dx = obj2.centerX_ - obj1.centerX_;
double Dy = obj2.centerY_ - obj1.centerY_;
if (Dx < DBL_EPSILON && Dx < DBL_EPSILON)
{
return;
}
double nDx = Dx / sqrt(Dx * Dx + Dy * Dy);
double nDy = Dy / sqrt(Dx * Dx + Dy * Dy);
double VD12x = obj1.velocityX_ - obj2.velocityX_;
double VD12y = obj1.velocityY_ - obj2.velocityY_;
double VD21x = obj2.velocityX_ - obj1.velocityX_;
double VD21y = obj2.velocityY_ - obj1.velocityY_;
double S1 = VD21x * nDx + VD21y * nDy;
double S2 = VD12x * nDx + VD12y * nDy;
if (objectMass < DBL_EPSILON)
{
return;
}
double C1 = (objectMass / (objectMass + objectMass)) * (1 + coefficientOfRestitution) * S1;
double C2 = (objectMass / (objectMass + objectMass)) * (1 + coefficientOfRestitution) * S2;
obj1.velocityX_ = C1 * nDx + obj1.velocityX_;
obj1.velocityY_ = C1 * nDy + obj1.velocityY_;
obj2.velocityX_ = C2 * nDx + obj2.velocityX_;
obj2.velocityY_ = C2 * nDy + obj2.velocityY_;
}
__device__ __host__ void collisionProcessObjectLine(Object & obj, const LineObject & line, const double objectMass, const double coefficientOfRestitution)
{
double Nx = line.y1_ - line.y2_;
double Ny = line.x1_ - line.x2_;
if (Nx * Nx < DBL_EPSILON && Ny * Ny < DBL_EPSILON)
{
return;
}
double nNx = Nx / sqrt(Nx * Nx + Ny * Ny);
double nNy = Ny / sqrt(Nx * Nx + Ny * Ny);
double dotNV = obj.velocityX_ * nNx + obj.velocityY_ * nNy;
double C = (1 + coefficientOfRestitution) * dotNV;
obj.velocityX_ = obj.velocityX_ - C * nNx;
obj.velocityY_ = obj.velocityY_ - C * nNy;
}
__global__ void updateObjects(double deltatime, Object* objSrc, Object* objDst, size_t objSIze, LineObject* lnObjSrc, size_t lnObjSize, const double objectMass, const double coefficientOfRestitution)
{
const size_t objIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
Object obj = objSrc[objIdx];
updateObjectVelocity(obj, deltatime);
for (size_t i = 0; i < 4; i++)
{
double collisionTime = 0.0;
if (checkCollisionObjectLine(obj, lnObjSrc[i], deltatime, collisionTime))
{
updateObjectPosition(obj, collisionTime);
collisionProcessObjectLine(obj, lnObjSrc[i], objectMass, coefficientOfRestitution);
updateObjectPosition(obj, deltatime - collisionTime);
objDst[objIdx] = obj;
return;
}
}
const size_t halfObjSize = (objSIze + 1) / 2;
const size_t endCheckIdx = (objIdx + halfObjSize + 1) % objSIze;
Object otherObj;
for (size_t i = objIdx + 1; i != endCheckIdx; i = (i + 1) % objSIze)
{
double collisionTime = 0.0;
otherObj = objSrc[i];
if (cuda::checkCollisionObjects(obj, otherObj, deltatime, collisionTime))
{
cuda::updateObjectPosition(obj, collisionTime);
cuda::updateObjectPosition(otherObj, collisionTime);
cuda::collisionProcessObjects(obj, otherObj, objectMass, coefficientOfRestitution);
cuda::updateObjectPosition(obj, deltatime - collisionTime);
// cuda::updateObjectPosition(otherObj, deltatime - collisionTime);
objDst[objIdx] = obj;
return;
}
}
updateObjectPosition(obj, deltatime);
objDst[objIdx] = obj;
return;
}
char* gpu_update(double deltatime, Object* objects, size_t objectsSize, LineObject* lineObjects, size_t lineObjectsSize, const double objectMass, const double coefficientOfRestitution)
{
Object* dInObjects;
LineObject* dInLineObjects;
Object* dOutObjects;
CHECK_CUDA_ERROR(hipHostMalloc((void**)&dInObjects, sizeof(Object) * objectsSize));
CHECK_CUDA_ERROR(hipHostMalloc((void**)&dInLineObjects, sizeof(LineObject) * lineObjectsSize));
CHECK_CUDA_ERROR(hipHostMalloc((void**)&dOutObjects, sizeof(Object) * objectsSize));
CHECK_CUDA_ERROR(hipMemcpy(dInObjects, objects, sizeof(Object) * objectsSize, hipMemcpyHostToDevice));
CHECK_CUDA_ERROR(hipMemcpy(dInLineObjects, lineObjects, sizeof(LineObject) * lineObjectsSize, hipMemcpyHostToDevice));
dim3 block(BLOCK_SIZE);
dim3 grid((objectsSize + block.x - 1) / block.x);
hipLaunchKernelGGL(( updateObjects) , dim3(grid), dim3(block), 0, 0, deltatime, dInObjects, dOutObjects, objectsSize, dInLineObjects, lineObjectsSize, objectMass, coefficientOfRestitution);
CHECK_CUDA_ERROR(hipDeviceSynchronize());
CHECK_CUDA_ERROR(hipMemcpy(objects, dOutObjects, sizeof(Object) * objectsSize, hipMemcpyDeviceToHost));
CHECK_CUDA_ERROR(hipHostFree((void*)dInObjects));
CHECK_CUDA_ERROR(hipHostFree((void*)dInLineObjects));
CHECK_CUDA_ERROR(hipHostFree((void*)dOutObjects));
return nullptr;
}
}
|
85ea1ba22bfb1ddaf4765569ccc1c0ef61e6419c.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda.h"
#define CHECK_CUDA_ERROR(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
char str[1024]; \
sprintf_s(str, 1024, "Cuda Error: %s:%d, cuda:%d, reason: %s\n", __FILE__, __LINE__, error, cudaGetErrorString(error)); \
return str; \
} \
} \
namespace cuda
{
#if 0
} // indent guard
#endif
constexpr size_t BLOCK_SIZE = 32;
__device__ __host__ void updateObjectPosition(Object & object, double updateTime)
{
object.centerX_ += object.velocityX_ * updateTime;
object.centerY_ += object.velocityY_ * updateTime;
}
__device__ __host__ void updateObjectVelocity(Object & object, double updateTime)
{
object.velocityX_ += object.accelerationX_ * updateTime;
object.velocityY_ += object.accelerationY_ * updateTime;
}
__device__ __host__ bool checkCollisionObjects(const Object & obj1, const Object & obj2, double deltatime, double & collisionTime)
{
double Dx0 = obj1.centerX_ - obj2.centerX_;
double Dy0 = obj1.centerY_ - obj2.centerY_;
double Dx1 = obj1.centerX_ - obj2.centerX_ + obj1.velocityX_ - obj2.velocityX_;
double Dy1 = obj1.centerY_ - obj2.centerY_ + obj1.velocityY_ - obj2.velocityY_;
double dDx = Dx1 - Dx0;
double dDy = Dy1 - Dy0;
double P = dDx * dDx + dDy * dDy;
double Q = dDx * Dx0 + dDy * Dy0;
double R = Dx0 * Dx0 + Dy0 * Dy0;
double r12S = (obj1.radius_ + obj2.radius_) * (obj1.radius_ + obj2.radius_);
if (R < r12S)
{
collisionTime = 0.0;
return true;
}
if (P < DBL_EPSILON)
{
return false;
}
double judge = Q * Q - P * (R - r12S);
if (judge < 0)
{
return false;
}
double aqrtJudge = sqrt(judge);
double T1 = (-Q + judge) / P;
double T2 = (-Q - judge) / P;
double T = T1 < T2 ? T1 : T2;
if (T < 0 || deltatime < T)
{
return false;
}
collisionTime = T;
return true;
}
__device__ __host__ bool checkCollisionObjectLine(const Object & obj, const LineObject & line, double deltatime, double & collisionTime)
{
double Nx = line.y1_ - line.y2_;
double Ny = line.x1_ - line.x2_;
double NN = sqrt(Nx * Nx + Ny * Ny);
if (NN < DBL_EPSILON)
{
return false;
}
double PN = abs((obj.centerX_ - line.x1_) * Nx + (obj.centerY_ - line.y1_) * Ny);
double VN = obj.velocityX_ * Nx + obj.velocityY_ * Ny;
double D = PN / NN - obj.radius_;
double dD = VN / NN;
if (dD * dD < DBL_EPSILON)
{
return false;
}
double T = D / dD;
if (T < 0 || deltatime < T)
{
return false;
}
collisionTime = T;
return true;
}
__device__ __host__ void collisionProcessObjects(Object & obj1, Object & obj2, const double objectMass, const double coefficientOfRestitution)
{
double Dx = obj2.centerX_ - obj1.centerX_;
double Dy = obj2.centerY_ - obj1.centerY_;
if (Dx < DBL_EPSILON && Dx < DBL_EPSILON)
{
return;
}
double nDx = Dx / sqrt(Dx * Dx + Dy * Dy);
double nDy = Dy / sqrt(Dx * Dx + Dy * Dy);
double VD12x = obj1.velocityX_ - obj2.velocityX_;
double VD12y = obj1.velocityY_ - obj2.velocityY_;
double VD21x = obj2.velocityX_ - obj1.velocityX_;
double VD21y = obj2.velocityY_ - obj1.velocityY_;
double S1 = VD21x * nDx + VD21y * nDy;
double S2 = VD12x * nDx + VD12y * nDy;
if (objectMass < DBL_EPSILON)
{
return;
}
double C1 = (objectMass / (objectMass + objectMass)) * (1 + coefficientOfRestitution) * S1;
double C2 = (objectMass / (objectMass + objectMass)) * (1 + coefficientOfRestitution) * S2;
obj1.velocityX_ = C1 * nDx + obj1.velocityX_;
obj1.velocityY_ = C1 * nDy + obj1.velocityY_;
obj2.velocityX_ = C2 * nDx + obj2.velocityX_;
obj2.velocityY_ = C2 * nDy + obj2.velocityY_;
}
__device__ __host__ void collisionProcessObjectLine(Object & obj, const LineObject & line, const double objectMass, const double coefficientOfRestitution)
{
double Nx = line.y1_ - line.y2_;
double Ny = line.x1_ - line.x2_;
if (Nx * Nx < DBL_EPSILON && Ny * Ny < DBL_EPSILON)
{
return;
}
double nNx = Nx / sqrt(Nx * Nx + Ny * Ny);
double nNy = Ny / sqrt(Nx * Nx + Ny * Ny);
double dotNV = obj.velocityX_ * nNx + obj.velocityY_ * nNy;
double C = (1 + coefficientOfRestitution) * dotNV;
obj.velocityX_ = obj.velocityX_ - C * nNx;
obj.velocityY_ = obj.velocityY_ - C * nNy;
}
__global__ void updateObjects(double deltatime, Object* objSrc, Object* objDst, size_t objSIze, LineObject* lnObjSrc, size_t lnObjSize, const double objectMass, const double coefficientOfRestitution)
{
const size_t objIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
Object obj = objSrc[objIdx];
updateObjectVelocity(obj, deltatime);
for (size_t i = 0; i < 4; i++)
{
double collisionTime = 0.0;
if (checkCollisionObjectLine(obj, lnObjSrc[i], deltatime, collisionTime))
{
updateObjectPosition(obj, collisionTime);
collisionProcessObjectLine(obj, lnObjSrc[i], objectMass, coefficientOfRestitution);
updateObjectPosition(obj, deltatime - collisionTime);
objDst[objIdx] = obj;
return;
}
}
const size_t halfObjSize = (objSIze + 1) / 2;
const size_t endCheckIdx = (objIdx + halfObjSize + 1) % objSIze;
Object otherObj;
for (size_t i = objIdx + 1; i != endCheckIdx; i = (i + 1) % objSIze)
{
double collisionTime = 0.0;
otherObj = objSrc[i];
if (cuda::checkCollisionObjects(obj, otherObj, deltatime, collisionTime))
{
cuda::updateObjectPosition(obj, collisionTime);
cuda::updateObjectPosition(otherObj, collisionTime);
cuda::collisionProcessObjects(obj, otherObj, objectMass, coefficientOfRestitution);
cuda::updateObjectPosition(obj, deltatime - collisionTime);
// cuda::updateObjectPosition(otherObj, deltatime - collisionTime);
objDst[objIdx] = obj;
return;
}
}
updateObjectPosition(obj, deltatime);
objDst[objIdx] = obj;
return;
}
char* gpu_update(double deltatime, Object* objects, size_t objectsSize, LineObject* lineObjects, size_t lineObjectsSize, const double objectMass, const double coefficientOfRestitution)
{
Object* dInObjects;
LineObject* dInLineObjects;
Object* dOutObjects;
CHECK_CUDA_ERROR(cudaMallocHost((void**)&dInObjects, sizeof(Object) * objectsSize));
CHECK_CUDA_ERROR(cudaMallocHost((void**)&dInLineObjects, sizeof(LineObject) * lineObjectsSize));
CHECK_CUDA_ERROR(cudaMallocHost((void**)&dOutObjects, sizeof(Object) * objectsSize));
CHECK_CUDA_ERROR(cudaMemcpy(dInObjects, objects, sizeof(Object) * objectsSize, cudaMemcpyHostToDevice));
CHECK_CUDA_ERROR(cudaMemcpy(dInLineObjects, lineObjects, sizeof(LineObject) * lineObjectsSize, cudaMemcpyHostToDevice));
dim3 block(BLOCK_SIZE);
dim3 grid((objectsSize + block.x - 1) / block.x);
updateObjects <<<grid, block>>>(deltatime, dInObjects, dOutObjects, objectsSize, dInLineObjects, lineObjectsSize, objectMass, coefficientOfRestitution);
CHECK_CUDA_ERROR(cudaDeviceSynchronize());
CHECK_CUDA_ERROR(cudaMemcpy(objects, dOutObjects, sizeof(Object) * objectsSize, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERROR(cudaFreeHost((void*)dInObjects));
CHECK_CUDA_ERROR(cudaFreeHost((void*)dInLineObjects));
CHECK_CUDA_ERROR(cudaFreeHost((void*)dOutObjects));
return nullptr;
}
}
|
75cf6a6ba6c765d02a3f0d777365e1c77dbbd260.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <test_utils.h>
#include <cuml/linear_model/glm.hpp>
#include <glm/qn/glm_linear.cuh>
#include <glm/qn/glm_logistic.cuh>
#include <glm/qn/glm_softmax.cuh>
#include <glm/qn/qn.cuh>
#include <vector>
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
raft::handle_t cuml_handle;
const raft::handle_t &handle;
hipStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle) {}
void SetUp() {
stream = cuml_handle.get_stream();
Xdev.reset(new SimpleMatOwning<double>(handle.get_device_allocator(), N, D,
stream, ROW_MAJOR));
raft::update_device(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.get_device_allocator(), N, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
allocator = handle.get_device_allocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const raft::handle_t &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
hipStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.get_device_allocator(), dims.n_param, stream);
raft::update_device(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
raft::update_device(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
return raft::devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const raft::handle_t &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const raft::handle_t &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double alpha = 0.01 * N;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
raft::CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016 * N;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01 * N;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
raft::CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
raft::CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
|
75cf6a6ba6c765d02a3f0d777365e1c77dbbd260.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <test_utils.h>
#include <cuml/linear_model/glm.hpp>
#include <glm/qn/glm_linear.cuh>
#include <glm/qn/glm_logistic.cuh>
#include <glm/qn/glm_softmax.cuh>
#include <glm/qn/qn.cuh>
#include <vector>
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
raft::handle_t cuml_handle;
const raft::handle_t &handle;
cudaStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle) {}
void SetUp() {
stream = cuml_handle.get_stream();
Xdev.reset(new SimpleMatOwning<double>(handle.get_device_allocator(), N, D,
stream, ROW_MAJOR));
raft::update_device(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.get_device_allocator(), N, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
allocator = handle.get_device_allocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const raft::handle_t &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
cudaStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.get_device_allocator(), dims.n_param, stream);
raft::update_device(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
raft::update_device(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
return raft::devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const raft::handle_t &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const raft::handle_t &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double alpha = 0.01 * N;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
raft::CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016 * N;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
raft::CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
raft::update_device(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01 * N;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
raft::CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
raft::CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
raft::update_device(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
raft::update_host(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
|
5f879b3044c0c0517e227354e0a682f240841f9a.hip
|
// !!! This is a file automatically generated by hipify!!!
//Udacity HW 4
//Radix Sorting
#include <algorithm>
#include "utils.h"
#include <thrust/host_vector.h>
#include <stdio.h>
#include "timer.h"
#define MAX_INT_BITS 32
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
GpuTimer timer;
unsigned int getMaxNumOfBits(unsigned int* h_input, const size_t numElems)
{
unsigned int maxElem = *std::max_element(h_input, h_input + numElems);
unsigned int mask = 1 << 31;
unsigned int count = 0;
while (! (mask & maxElem))
{
++count;
mask >>= 1;
}
return MAX_INT_BITS - count;
}
void radix_sort(unsigned int* h_inputVals,
unsigned int* h_inputPos,
unsigned int* h_outputVals,
unsigned int* h_outputPos,
const size_t numElems,
const unsigned int numBits)
{
unsigned int numBins = 1 << numBits;
unsigned int* h_hist = (unsigned int*) malloc (sizeof(unsigned int) * numBins);
unsigned int* h_histScan = (unsigned int*) malloc (sizeof(unsigned int) * numBins);
unsigned int* pInVals = h_inputVals;
unsigned int* pInPos = h_inputPos;
unsigned int* pOutVals = h_outputVals;
unsigned int* pOutPos = h_outputPos;
float histKernelTime = 0;
float scanKernelTime = 0;
float scatterKernelTime = 0;
unsigned int maxBits = getMaxNumOfBits(h_inputVals, numElems);
if (maxBits % numBits)
maxBits += numBits;
// loop through digits
for (unsigned int i = 0; i <= maxBits; i += numBits)
{
unsigned int mask = (numBins - 1) << i;
//init bin histogram
memset(h_hist, 0, sizeof(unsigned int) * numBins);
memset(h_histScan, 0, sizeof(unsigned int) * numBins);
//histogram
timer.Start();
for (unsigned int j = 0; j < numElems; ++j)
{
unsigned int bin = (pInVals[j] & mask) >> i;
++h_hist[bin];
}
timer.Stop();
histKernelTime += timer.Elapsed();
// exclusive scan hist
timer.Start();
for (unsigned int j = 1; j < numBins; ++j)
{
h_histScan[j] += h_histScan[j - 1] + h_hist[j - 1];
}
timer.Stop();
scanKernelTime += timer.Elapsed();
timer.Start();
for (int j = 0; j < numElems; ++j)
{
unsigned int bin = (pInVals[j] & mask) >> i;
pOutVals[h_histScan[bin]] = pInVals[j];
pOutPos[h_histScan[bin]] = pInPos[j];
++h_histScan[bin];
}
timer.Stop();
scatterKernelTime += timer.Elapsed();
std::swap(pInVals, pOutVals);
std::swap(pInPos, pOutPos);
}
if (pInVals == h_outputVals)
{
std::copy(h_inputVals, h_inputVals + numElems, h_outputVals);
std::copy(h_inputPos, h_inputPos + numElems, h_outputPos);
}
printf("%15s%12d%12d%16.3f\n", "Histogram", 0, 0, histKernelTime);
printf("%15s%12d%12d%16.3f\n", "Scan", 0, 0, scanKernelTime);
printf("%15s%12d%12d%16.3f\n", "Scatter", 0, 0, scatterKernelTime);
}
void get_devices_info()
{
int numdevs;
hipGetDeviceCount(&numdevs);
printf("\n\n\nNum devices = %d\n", numdevs);
for (int i = 0; i < numdevs; ++i)
{
printf("Device %d\n", i);
hipDeviceProp_t devprop;
hipGetDeviceProperties(&devprop, i);
printf("CC %d.%d\n", devprop.major, devprop.minor);
printf("Max num threads/block: %d\n", devprop.maxThreadsPerBlock);
printf("Maximum block dimensions: %d x %d x %d\n", devprop.maxThreadsDim[0], devprop.maxThreadsDim[1], devprop.maxThreadsDim[2]);
printf("Maximum grid dimensions: %d x %d x %d\n\n", devprop.maxGridSize[0], devprop.maxGridSize[1], devprop.maxGridSize[2]);
printf("-----------------------------------------------------------------------------\n\n");
}
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
unsigned int numBits = 2;
printf("\nSize: %d\n", numElems);
unsigned int* h_inputVals = (unsigned int*) malloc (sizeof(unsigned int) * numElems);
checkCudaErrors(hipMemcpy(h_inputVals, d_inputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
unsigned int* h_inputPos = (unsigned int*) malloc (sizeof(unsigned int) * numElems);
checkCudaErrors(hipMemcpy(h_inputPos, d_inputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
printf("\n%15s%12s%12s%16s\n",
"Function", "BlockSize", "GridSize", "TotalTime(ms)");
unsigned int* h_outputVals = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
unsigned int* h_outputPos = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
radix_sort(h_inputVals, h_inputPos, h_outputVals, h_outputPos, numElems, numBits);
checkCudaErrors(hipMemcpy(d_outputVals, h_outputVals, numElems * sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, h_outputPos, numElems * sizeof(unsigned int), hipMemcpyHostToDevice));
free(h_inputVals);
free(h_inputPos);
free(h_outputVals);
free(h_outputPos);
}
|
5f879b3044c0c0517e227354e0a682f240841f9a.cu
|
//Udacity HW 4
//Radix Sorting
#include <algorithm>
#include "utils.h"
#include <thrust/host_vector.h>
#include <stdio.h>
#include "timer.h"
#define MAX_INT_BITS 32
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
GpuTimer timer;
unsigned int getMaxNumOfBits(unsigned int* h_input, const size_t numElems)
{
unsigned int maxElem = *std::max_element(h_input, h_input + numElems);
unsigned int mask = 1 << 31;
unsigned int count = 0;
while (! (mask & maxElem))
{
++count;
mask >>= 1;
}
return MAX_INT_BITS - count;
}
void radix_sort(unsigned int* h_inputVals,
unsigned int* h_inputPos,
unsigned int* h_outputVals,
unsigned int* h_outputPos,
const size_t numElems,
const unsigned int numBits)
{
unsigned int numBins = 1 << numBits;
unsigned int* h_hist = (unsigned int*) malloc (sizeof(unsigned int) * numBins);
unsigned int* h_histScan = (unsigned int*) malloc (sizeof(unsigned int) * numBins);
unsigned int* pInVals = h_inputVals;
unsigned int* pInPos = h_inputPos;
unsigned int* pOutVals = h_outputVals;
unsigned int* pOutPos = h_outputPos;
float histKernelTime = 0;
float scanKernelTime = 0;
float scatterKernelTime = 0;
unsigned int maxBits = getMaxNumOfBits(h_inputVals, numElems);
if (maxBits % numBits)
maxBits += numBits;
// loop through digits
for (unsigned int i = 0; i <= maxBits; i += numBits)
{
unsigned int mask = (numBins - 1) << i;
//init bin histogram
memset(h_hist, 0, sizeof(unsigned int) * numBins);
memset(h_histScan, 0, sizeof(unsigned int) * numBins);
//histogram
timer.Start();
for (unsigned int j = 0; j < numElems; ++j)
{
unsigned int bin = (pInVals[j] & mask) >> i;
++h_hist[bin];
}
timer.Stop();
histKernelTime += timer.Elapsed();
// exclusive scan hist
timer.Start();
for (unsigned int j = 1; j < numBins; ++j)
{
h_histScan[j] += h_histScan[j - 1] + h_hist[j - 1];
}
timer.Stop();
scanKernelTime += timer.Elapsed();
timer.Start();
for (int j = 0; j < numElems; ++j)
{
unsigned int bin = (pInVals[j] & mask) >> i;
pOutVals[h_histScan[bin]] = pInVals[j];
pOutPos[h_histScan[bin]] = pInPos[j];
++h_histScan[bin];
}
timer.Stop();
scatterKernelTime += timer.Elapsed();
std::swap(pInVals, pOutVals);
std::swap(pInPos, pOutPos);
}
if (pInVals == h_outputVals)
{
std::copy(h_inputVals, h_inputVals + numElems, h_outputVals);
std::copy(h_inputPos, h_inputPos + numElems, h_outputPos);
}
printf("%15s%12d%12d%16.3f\n", "Histogram", 0, 0, histKernelTime);
printf("%15s%12d%12d%16.3f\n", "Scan", 0, 0, scanKernelTime);
printf("%15s%12d%12d%16.3f\n", "Scatter", 0, 0, scatterKernelTime);
}
void get_devices_info()
{
int numdevs;
cudaGetDeviceCount(&numdevs);
printf("\n\n\nNum devices = %d\n", numdevs);
for (int i = 0; i < numdevs; ++i)
{
printf("Device %d\n", i);
cudaDeviceProp devprop;
cudaGetDeviceProperties(&devprop, i);
printf("CC %d.%d\n", devprop.major, devprop.minor);
printf("Max num threads/block: %d\n", devprop.maxThreadsPerBlock);
printf("Maximum block dimensions: %d x %d x %d\n", devprop.maxThreadsDim[0], devprop.maxThreadsDim[1], devprop.maxThreadsDim[2]);
printf("Maximum grid dimensions: %d x %d x %d\n\n", devprop.maxGridSize[0], devprop.maxGridSize[1], devprop.maxGridSize[2]);
printf("-----------------------------------------------------------------------------\n\n");
}
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
unsigned int numBits = 2;
printf("\nSize: %d\n", numElems);
unsigned int* h_inputVals = (unsigned int*) malloc (sizeof(unsigned int) * numElems);
checkCudaErrors(cudaMemcpy(h_inputVals, d_inputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
unsigned int* h_inputPos = (unsigned int*) malloc (sizeof(unsigned int) * numElems);
checkCudaErrors(cudaMemcpy(h_inputPos, d_inputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
printf("\n%15s%12s%12s%16s\n",
"Function", "BlockSize", "GridSize", "TotalTime(ms)");
unsigned int* h_outputVals = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
unsigned int* h_outputPos = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
radix_sort(h_inputVals, h_inputPos, h_outputVals, h_outputPos, numElems, numBits);
checkCudaErrors(cudaMemcpy(d_outputVals, h_outputVals, numElems * sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, h_outputPos, numElems * sizeof(unsigned int), cudaMemcpyHostToDevice));
free(h_inputVals);
free(h_inputPos);
free(h_outputVals);
free(h_outputPos);
}
|
d84d4022130e6c9596a75cfae95c8e04e7a6eac8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/column_buffer.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/utilities/bit.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
constexpr int block_size = 128;
constexpr int non_zero_buffer_size = block_size * 2;
inline __device__ uint32_t rotl32(uint32_t x, uint32_t r)
{
return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r));
}
inline __device__ int rolling_index(int index) { return index & (non_zero_buffer_size - 1); }
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct page_state_s {
const uint8_t* data_start;
const uint8_t* data_end;
const uint8_t* lvl_end;
const uint8_t* dict_base; // ptr to dictionary page data
int32_t dict_size; // size of dictionary data
int32_t first_row; // First row in page to output
int32_t num_rows; // Rows in page to decode (including rows to be skipped)
int32_t first_output_value; // First value in page to output
int32_t num_input_values; // total # of input/level values in the page
int32_t dtype_len; // Output data type length
int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit
int32_t dict_bits; // # of bits to store dictionary indices
uint32_t dict_run;
int32_t dict_val;
uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep]
int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep]
int32_t error;
PageInfo page;
ColumnChunkDesc col;
// (leaf) value decoding
int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer)
int32_t dict_pos; // write position of dictionary indices
int32_t src_pos; // input read position of final output value
int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale
uint32_t nz_idx[non_zero_buffer_size]; // circular buffer of non-null value positions
uint32_t dict_idx[non_zero_buffer_size]; // Dictionary index, boolean, or string offset values
uint32_t str_len[non_zero_buffer_size]; // String length for plain encoding of strings
// repetition/definition level decoding
int32_t input_value_count; // how many values of the input we've processed
int32_t input_row_count; // how many rows of the input we've processed
int32_t input_leaf_count; // how many leaf values of the input we've processed
uint32_t rep[non_zero_buffer_size]; // circular buffer of repetition level values
uint32_t def[non_zero_buffer_size]; // circular buffer of definition level values
const uint8_t* lvl_start[NUM_LEVEL_TYPES]; // [def,rep]
int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded
int32_t row_index_lower_bound; // lower bound of row indices we should process
};
/**
* @brief Computes a 32-bit hash when given a byte stream and range.
*
* MurmurHash3_32 implementation from
* https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
*
* MurmurHash3 was written by Austin Appleby, and is placed in the public
* domain. The author hereby disclaims copyright to this source code.
*
* @param[in] key The input data to hash
* @param[in] len The length of the input data
* @param[in] seed An initialization value
*
* @return The hash value
*/
__device__ uint32_t device_str2hash32(const char* key, size_t len, uint32_t seed = 33)
{
const auto* p = reinterpret_cast<const uint8_t*>(key);
uint32_t h1 = seed, k1;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
int l = len;
// body
while (l >= 4) {
k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
k1 *= c1;
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = rotl32(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
p += 4;
l -= 4;
}
// tail
k1 = 0;
switch (l) {
case 3: k1 ^= p[2] << 16;
case 2: k1 ^= p[1] << 8;
case 1:
k1 ^= p[0];
k1 *= c1;
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
}
// finalization
h1 ^= len;
h1 ^= h1 >> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >> 16;
return h1;
}
/**
* @brief Read a 32-bit varint integer
*
* @param[in,out] cur The current data position, updated after the read
* @param[in] end The end data position
*
* @return The 32-bit value read
*/
inline __device__ uint32_t get_vlq32(const uint8_t*& cur, const uint8_t* end)
{
uint32_t v = *cur++;
if (v >= 0x80 && cur < end) {
v = (v & 0x7f) | ((*cur++) << 7);
if (v >= (0x80 << 7) && cur < end) {
v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14);
if (v >= (0x80 << 14) && cur < end) {
v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21);
if (v >= (0x80 << 21) && cur < end) {
v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28);
}
}
}
}
return v;
}
/**
* @brief Parse the beginning of the level section (definition or repetition),
* initializes the initial RLE run & value, and returns the section length
*
* @param[in,out] s The page state
* @param[in] cur The current data position
* @param[in] end The end of the data
* @param[in] level_bits The bits required
*
* @return The length of the section
*/
__device__ uint32_t InitLevelSection(page_state_s* s,
const uint8_t* cur,
const uint8_t* end,
level_type lvl)
{
int32_t len;
int level_bits = s->col.level_bits[lvl];
Encoding encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding
: s->page.repetition_level_encoding;
if (level_bits == 0) {
len = 0;
s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value
s->initial_rle_value[lvl] = 0;
s->lvl_start[lvl] = cur;
} else if (encoding == Encoding::RLE) {
if (cur + 4 < end) {
uint32_t run;
len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24);
cur += 4;
run = get_vlq32(cur, end);
s->initial_rle_run[lvl] = run;
if (!(run & 1)) {
int v = (cur < end) ? cur[0] : 0;
cur++;
if (level_bits > 8) {
v |= ((cur < end) ? cur[0] : 0) << 8;
cur++;
}
s->initial_rle_value[lvl] = v;
}
s->lvl_start[lvl] = cur;
if (cur > end) { s->error = 2; }
} else {
len = 0;
s->error = 2;
}
} else if (encoding == Encoding::BIT_PACKED) {
len = (s->page.num_input_values * level_bits + 7) >> 3;
s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run
s->initial_rle_value[lvl] = 0;
s->lvl_start[lvl] = cur;
} else {
s->error = 3;
len = 0;
}
return (uint32_t)len;
}
/**
* @brief Decode values out of a definition or repetition stream
*
* @param[in,out] s Page state input/output
* @param[in] t target_count Target count of stream values on output
* @param[in] t Warp0 thread ID (0..31)
* @param[in] lvl The level type we are decoding - DEFINITION or REPETITION
*/
__device__ void gpuDecodeStream(
uint32_t* output, page_state_s* s, int32_t target_count, int t, level_type lvl)
{
const uint8_t* cur_def = s->lvl_start[lvl];
const uint8_t* end = s->lvl_end;
uint32_t level_run = s->initial_rle_run[lvl];
int32_t level_val = s->initial_rle_value[lvl];
int level_bits = s->col.level_bits[lvl];
int32_t num_input_values = s->num_input_values;
int32_t value_count = s->lvl_count[lvl];
int32_t batch_coded_count = 0;
while (value_count < target_count && value_count < num_input_values) {
int batch_len;
if (level_run <= 1) {
// Get a new run symbol from the byte stream
int sym_len = 0;
if (!t) {
const uint8_t* cur = cur_def;
if (cur < end) { level_run = get_vlq32(cur, end); }
if (!(level_run & 1)) {
if (cur < end) level_val = cur[0];
cur++;
if (level_bits > 8) {
if (cur < end) level_val |= cur[0] << 8;
cur++;
}
}
if (cur > end || level_run <= 1) { s->error = 0x10; }
sym_len = (int32_t)(cur - cur_def);
__threadfence_block();
}
sym_len = shuffle(sym_len);
level_val = shuffle(level_val);
level_run = shuffle(level_run);
cur_def += sym_len;
}
if (s->error) { break; }
batch_len = min(num_input_values - value_count, 32);
if (level_run & 1) {
// Literal run
int batch_len8;
batch_len = min(batch_len, (level_run >> 1) * 8);
batch_len8 = (batch_len + 7) >> 3;
if (t < batch_len) {
int bitpos = t * level_bits;
const uint8_t* cur = cur_def + (bitpos >> 3);
bitpos &= 7;
if (cur < end) level_val = cur[0];
cur++;
if (level_bits > 8 - bitpos && cur < end) {
level_val |= cur[0] << 8;
cur++;
if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16;
}
level_val = (level_val >> bitpos) & ((1 << level_bits) - 1);
}
level_run -= batch_len8 * 2;
cur_def += batch_len8 * level_bits;
} else {
// Repeated value
batch_len = min(batch_len, level_run >> 1);
level_run -= batch_len * 2;
}
if (t < batch_len) {
int idx = value_count + t;
output[idx & (non_zero_buffer_size - 1)] = level_val;
}
batch_coded_count += batch_len;
value_count += batch_len;
}
// update the stream info
if (!t) {
s->lvl_start[lvl] = cur_def;
s->initial_rle_run[lvl] = level_run;
s->initial_rle_value[lvl] = level_val;
s->lvl_count[lvl] = value_count;
}
}
/**
* @brief Performs RLE decoding of dictionary indexes
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to
* 31)
* @param[in] t Warp1 thread ID (0..31)
*
* @return The new output position
*/
__device__ int gpuDecodeDictionaryIndices(volatile page_state_s* s, int target_pos, int t)
{
const uint8_t* end = s->data_end;
int dict_bits = s->dict_bits;
int pos = s->dict_pos;
while (pos < target_pos) {
int is_literal, batch_len;
if (!t) {
uint32_t run = s->dict_run;
const uint8_t* cur = s->data_start;
if (run <= 1) {
run = (cur < end) ? get_vlq32(cur, end) : 0;
if (!(run & 1)) {
// Repeated value
int bytecnt = (dict_bits + 7) >> 3;
if (cur + bytecnt <= end) {
int32_t run_val = cur[0];
if (bytecnt > 1) {
run_val |= cur[1] << 8;
if (bytecnt > 2) {
run_val |= cur[2] << 16;
if (bytecnt > 3) { run_val |= cur[3] << 24; }
}
}
s->dict_val = run_val & ((1 << dict_bits) - 1);
}
cur += bytecnt;
}
}
if (run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(32, (int)(run >> 1) * 8), 1);
batch_len_div8 = (batch_len + 7) >> 3;
run -= batch_len_div8 * 2;
cur += batch_len_div8 * dict_bits;
} else {
batch_len = max(min(32, (int)(run >> 1)), 1);
run -= batch_len * 2;
}
s->dict_run = run;
s->data_start = cur;
is_literal = run & 1;
__threadfence_block();
}
__syncwarp();
is_literal = shuffle(is_literal);
batch_len = shuffle(batch_len);
if (t < batch_len) {
int dict_idx = s->dict_val;
if (is_literal) {
int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits;
const uint8_t* p = s->data_start + (ofs >> 3);
ofs &= 7;
if (p < end) {
uint32_t c = 8 - ofs;
dict_idx = (*p++) >> ofs;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; }
}
}
dict_idx &= (1 << dict_bits) - 1;
}
}
s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx;
}
pos += batch_len;
}
return pos;
}
/**
* @brief Performs RLE decoding of dictionary indexes, for when dict_size=1
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target write position
* @param[in] t Thread ID
*
* @return The new output position
*/
__device__ int gpuDecodeRleBooleans(volatile page_state_s* s, int target_pos, int t)
{
const uint8_t* end = s->data_end;
int pos = s->dict_pos;
while (pos < target_pos) {
int is_literal, batch_len;
if (!t) {
uint32_t run = s->dict_run;
const uint8_t* cur = s->data_start;
if (run <= 1) {
run = (cur < end) ? get_vlq32(cur, end) : 0;
if (!(run & 1)) {
// Repeated value
s->dict_val = (cur < end) ? cur[0] & 1 : 0;
cur++;
}
}
if (run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(32, (int)(run >> 1) * 8), 1);
if (batch_len >= 8) { batch_len &= ~7; }
batch_len_div8 = (batch_len + 7) >> 3;
run -= batch_len_div8 * 2;
cur += batch_len_div8;
} else {
batch_len = max(min(32, (int)(run >> 1)), 1);
run -= batch_len * 2;
}
s->dict_run = run;
s->data_start = cur;
is_literal = run & 1;
__threadfence_block();
}
__syncwarp();
is_literal = shuffle(is_literal);
batch_len = shuffle(batch_len);
if (t < batch_len) {
int dict_idx;
if (is_literal) {
int32_t ofs = t - ((batch_len + 7) & ~7);
const uint8_t* p = s->data_start + (ofs >> 3);
dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0;
} else {
dict_idx = s->dict_val;
}
s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx;
}
pos += batch_len;
}
return pos;
}
/**
* @brief Parses the length and position of strings
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target output position
* @param[in] t Thread ID
*
* @return The new output position
*/
__device__ void gpuInitStringDescriptors(volatile page_state_s* s, int target_pos, int t)
{
int pos = s->dict_pos;
// This step is purely serial
if (!t) {
const uint8_t* cur = s->data_start;
int dict_size = s->dict_size;
int k = s->dict_val;
while (pos < target_pos) {
int len;
if (k + 4 <= dict_size) {
len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24);
k += 4;
if (k + len > dict_size) { len = 0; }
} else {
len = 0;
}
s->dict_idx[pos & (non_zero_buffer_size - 1)] = k;
s->str_len[pos & (non_zero_buffer_size - 1)] = len;
k += len;
pos++;
}
s->dict_val = k;
__threadfence_block();
}
}
/**
* @brief Output a string descriptor
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash)
*/
inline __device__ void gpuOutputString(volatile page_state_s* s, int src_pos, void* dstv)
{
const char* ptr = nullptr;
size_t len = 0;
if (s->dict_base) {
// String dictionary
uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] *
sizeof(string_index_pair)
: 0;
if (dict_pos < (uint32_t)s->dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos);
ptr = src->first;
len = src->second;
}
} else {
// Plain encoding
uint32_t dict_pos = s->dict_idx[src_pos & (non_zero_buffer_size - 1)];
if (dict_pos <= (uint32_t)s->dict_size) {
ptr = reinterpret_cast<const char*>(s->data_start + dict_pos);
len = s->str_len[src_pos & (non_zero_buffer_size - 1)];
}
}
if (s->dtype_len == 4) {
// Output hash
*static_cast<uint32_t*>(dstv) = device_str2hash32(ptr, len);
} else {
// Output string descriptor
auto* dst = static_cast<string_index_pair*>(dstv);
dst->first = ptr;
dst->second = len;
}
}
/**
* @brief Output a boolean
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
inline __device__ void gpuOutputBoolean(volatile page_state_s* s, int src_pos, uint8_t* dst)
{
*dst = s->dict_idx[src_pos & (non_zero_buffer_size - 1)];
}
/**
* @brief Store a 32-bit data element
*
* @param[out] dst ptr to output
* @param[in] src8 raw input bytes
* @param[in] dict_pos byte position in dictionary
* @param[in] dict_size size of dictionary
*/
inline __device__ void gpuStoreOutput(uint32_t* dst,
const uint8_t* src8,
uint32_t dict_pos,
uint32_t dict_size)
{
uint32_t bytebuf;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos < dict_size) {
bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos);
if (ofs) {
uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs);
}
} else {
bytebuf = 0;
}
*dst = bytebuf;
}
/**
* @brief Store a 64-bit data element
*
* @param[out] dst ptr to output
* @param[in] src8 raw input bytes
* @param[in] dict_pos byte position in dictionary
* @param[in] dict_size size of dictionary
*/
inline __device__ void gpuStoreOutput(uint2* dst,
const uint8_t* src8,
uint32_t dict_pos,
uint32_t dict_size)
{
uint2 v;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos < dict_size) {
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, next, ofs);
}
} else {
v.x = v.y = 0;
}
*dst = v;
}
/**
* @brief Convert an INT96 Spark timestamp to 64-bit timestamp
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[out] dst Pointer to row output data
*/
inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst)
{
using cuda::std::chrono::duration_cast;
const uint8_t* src8;
uint32_t dict_pos, dict_size = s->dict_size, ofs;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
src8 = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
src8 = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos + 4 >= dict_size) {
*dst = 0;
return;
}
uint3 v;
int64_t nanos, days;
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
v.z = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 12);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, v.z, ofs);
v.z = __funnelshift_r(v.z, next, ofs);
}
nanos = v.y;
nanos <<= 32;
nanos |= v.x;
// Convert from Julian day at noon to UTC seconds
days = static_cast<int32_t>(v.z);
cudf::duration_D d_d{
days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow
*dst = [&]() {
switch (s->col.ts_clock_rate) {
case 1: // seconds
return duration_cast<duration_s>(d_d).count() +
duration_cast<duration_s>(duration_ns{nanos}).count();
case 1'000: // milliseconds
return duration_cast<duration_ms>(d_d).count() +
duration_cast<duration_ms>(duration_ns{nanos}).count();
case 1'000'000: // microseconds
return duration_cast<duration_us>(d_d).count() +
duration_cast<duration_us>(duration_ns{nanos}).count();
case 1'000'000'000: // nanoseconds
default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos;
}
}();
}
/**
* @brief Output a 64-bit timestamp
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst)
{
const uint8_t* src8;
uint32_t dict_pos, dict_size = s->dict_size, ofs;
int64_t ts;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
src8 = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
src8 = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos + 4 < dict_size) {
uint2 v;
int64_t val;
int32_t ts_scale;
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, next, ofs);
}
val = v.y;
val <<= 32;
val |= v.x;
// Output to desired clock rate
ts_scale = s->ts_scale;
if (ts_scale < 0) {
// round towards negative infinity
int sign = (val < 0);
ts = ((val + sign) / -ts_scale) + sign;
} else {
ts = val * ts_scale;
}
} else {
ts = 0;
}
*dst = ts;
}
/**
* @brief Output a fixed-length byte array as int.
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
template <typename T>
__device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, int src_pos, T* dst)
{
uint32_t const dtype_len_in = s->dtype_len_in;
uint8_t const* data = s->dict_base ? s->dict_base : s->data_start;
uint32_t const pos =
(s->dict_base ? ((s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0)
: src_pos) *
dtype_len_in;
uint32_t const dict_size = s->dict_size;
T unscaled = 0;
for (unsigned int i = 0; i < dtype_len_in; i++) {
uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0;
unscaled = (unscaled << 8) | v;
}
// Shift the unscaled value up and back down when it isn't all 8 bytes,
// which sign extend the value for correctly representing negative numbers.
if (dtype_len_in < sizeof(T)) {
unscaled <<= (sizeof(T) - dtype_len_in) * 8;
unscaled >>= (sizeof(T) - dtype_len_in) * 8;
}
*dst = unscaled;
}
/**
* @brief Output a small fixed-length value
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
template <typename T>
inline __device__ void gpuOutputFast(volatile page_state_s* s, int src_pos, T* dst)
{
const uint8_t* dict;
uint32_t dict_pos, dict_size = s->dict_size;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
dict = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
dict = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
gpuStoreOutput(dst, dict, dict_pos, dict_size);
}
/**
* @brief Output a N-byte value
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst8 Pointer to row output data
* @param[in] len Length of element
*/
static __device__ void gpuOutputGeneric(volatile page_state_s* s,
int src_pos,
uint8_t* dst8,
int len)
{
const uint8_t* dict;
uint32_t dict_pos, dict_size = s->dict_size;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
dict = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
dict = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
if (len & 3) {
// Generic slow path
for (unsigned int i = 0; i < len; i++) {
dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0;
}
} else {
// Copy 4 bytes at a time
const uint8_t* src8 = dict;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
for (unsigned int i = 0; i < len; i += 4) {
uint32_t bytebuf;
if (dict_pos < dict_size) {
bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos);
if (ofs) {
uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs);
}
} else {
bytebuf = 0;
}
dict_pos += 4;
*reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf;
}
}
}
/**
* @brief Sets up block-local page state information from the global pages.
*
* @param[in, out] s The local page state to be filled in
* @param[in] p The global page to be copied from
* @param[in] chunks The global list of chunks
* @param[in] num_rows Maximum number of rows to read
* @param[in] min_row crop all rows below min_row
* @param[in] num_chunk Number of column chunks
*/
static __device__ bool setupLocalPageInfo(page_state_s* const s,
PageInfo* p,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks)
{
int t = threadIdx.x;
int chunk_idx;
// Fetch page info
if (t == 0) s->page = *p;
__syncthreads();
if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; }
// Fetch column chunk info
chunk_idx = s->page.chunk_idx;
if (t == 0) { s->col = chunks[chunk_idx]; }
// zero nested value and valid counts
int d = 0;
while (d < s->page.num_nesting_levels) {
if (d + t < s->page.num_nesting_levels) {
s->page.nesting[d + t].valid_count = 0;
s->page.nesting[d + t].value_count = 0;
s->page.nesting[d + t].null_count = 0;
}
d += blockDim.x;
}
__syncthreads();
if (!t) {
s->error = 0;
// our starting row (absolute index) is
// col.start_row == absolute row index
// page.chunk-row == relative row index within the chunk
size_t page_start_row = s->col.start_row + s->page.chunk_row;
// IMPORTANT : nested schemas can have 0 rows in a page but still have
// values. The case is:
// - On page N-1, the last row starts, with 2/6 values encoded
// - On page N, the remaining 4/6 values are encoded, but there are no new rows.
// if (s->page.num_input_values > 0 && s->page.num_rows > 0) {
if (s->page.num_input_values > 0) {
uint8_t* cur = s->page.page_data;
uint8_t* end = cur + s->page.uncompressed_page_size;
uint32_t dtype_len_out = s->col.data_type >> 3;
s->ts_scale = 0;
// Validate data type
auto const data_type = s->col.data_type & 7;
switch (data_type) {
case BOOLEAN:
s->dtype_len = 1; // Boolean are stored as 1 byte on the output
break;
case INT32:
case FLOAT: s->dtype_len = 4; break;
case INT64:
if (s->col.ts_clock_rate) {
int32_t units = 0;
if (s->col.converted_type == TIME_MILLIS or s->col.converted_type == TIMESTAMP_MILLIS) {
units = cudf::timestamp_ms::period::den;
} else if (s->col.converted_type == TIME_MICROS or
s->col.converted_type == TIMESTAMP_MICROS) {
units = cudf::timestamp_us::period::den;
} else if (s->col.logical_type.TIMESTAMP.unit.isset.NANOS) {
units = cudf::timestamp_ns::period::den;
}
if (units and units != s->col.ts_clock_rate) {
s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate)
: (s->col.ts_clock_rate / units);
}
}
// Fall through to DOUBLE
case DOUBLE: s->dtype_len = 8; break;
case INT96: s->dtype_len = 12; break;
case BYTE_ARRAY: s->dtype_len = sizeof(string_index_pair); break;
default: // FIXED_LEN_BYTE_ARRAY:
s->dtype_len = dtype_len_out;
s->error |= (s->dtype_len <= 0);
break;
}
// Special check for downconversions
s->dtype_len_in = s->dtype_len;
if (s->col.converted_type == DECIMAL && data_type == FIXED_LEN_BYTE_ARRAY) {
s->dtype_len = s->dtype_len <= sizeof(int32_t) ? sizeof(int32_t)
: s->dtype_len <= sizeof(int64_t) ? sizeof(int64_t)
: sizeof(__int128_t);
} else if (data_type == INT32) {
if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output
if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output
} else if (data_type == BYTE_ARRAY && dtype_len_out == 4) {
s->dtype_len = 4; // HASH32 output
} else if (data_type == INT96) {
s->dtype_len = 8; // Convert to 64-bit timestamp
}
// first row within the page to output
if (page_start_row >= min_row) {
s->first_row = 0;
} else {
s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows);
}
// # of rows within the page to output
s->num_rows = s->page.num_rows;
if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) {
s->num_rows =
(int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0));
}
// during the decoding step we need to offset the global output buffers
// for each level of nesting so that we write to the section this page
// is responsible for.
// - for flat schemas, we can do this directly by using row counts
// - for nested schemas, these offsets are computed during the preprocess step
if (s->col.column_data_base != nullptr) {
int max_depth = s->col.max_nesting_depth;
for (int idx = 0; idx < max_depth; idx++) {
PageNestingInfo* pni = &s->page.nesting[idx];
size_t output_offset;
// schemas without lists
if (s->col.max_level[level_type::REPETITION] == 0) {
output_offset = page_start_row >= min_row ? page_start_row - min_row : 0;
}
// for schemas with lists, we've already got the exactly value precomputed
else {
output_offset = pni->page_start_value;
}
pni->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]);
if (pni->data_out != nullptr) {
// anything below max depth with a valid data pointer must be a list, so the
// element size is the size of the offset type.
uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len;
pni->data_out += (output_offset * len);
}
pni->valid_map = s->col.valid_map_base[idx];
if (pni->valid_map != nullptr) {
pni->valid_map += output_offset >> 5;
pni->valid_map_offset = (int32_t)(output_offset & 0x1f);
}
}
}
s->first_output_value = 0;
// Find the compressed size of repetition levels
cur += InitLevelSection(s, cur, end, level_type::REPETITION);
// Find the compressed size of definition levels
cur += InitLevelSection(s, cur, end, level_type::DEFINITION);
s->dict_bits = 0;
s->dict_base = nullptr;
s->dict_size = 0;
switch (s->page.encoding) {
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE_DICTIONARY:
// RLE-packed dictionary indices, first byte indicates index length in bits
if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) {
// String dictionary: use index
s->dict_base = reinterpret_cast<const uint8_t*>(s->col.str_dict_index);
s->dict_size = s->col.page_info[0].num_input_values * sizeof(string_index_pair);
} else {
s->dict_base =
s->col.page_info[0].page_data; // dictionary is always stored in the first page
s->dict_size = s->col.page_info[0].uncompressed_page_size;
}
s->dict_run = 0;
s->dict_val = 0;
s->dict_bits = (cur < end) ? *cur++ : 0;
if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; }
break;
case Encoding::PLAIN:
s->dict_size = static_cast<int32_t>(end - cur);
s->dict_val = 0;
if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; }
break;
case Encoding::RLE: s->dict_run = 0; break;
default:
s->error = 1; // Unsupported encoding
break;
}
if (cur > end) { s->error = 1; }
s->lvl_end = cur;
s->data_start = cur;
s->data_end = end;
} else {
s->error = 1;
}
s->lvl_count[level_type::REPETITION] = 0;
s->lvl_count[level_type::DEFINITION] = 0;
s->nz_count = 0;
s->num_input_values = s->page.num_input_values;
s->dict_pos = 0;
s->src_pos = 0;
// for flat hierarchies, we can't know how many leaf values to skip unless we do a full
// preprocess of the definition levels (since nulls will have no actual decodable value, there
// is no direct correlation between # of rows and # of decodable values). so we will start
// processing at the beginning of the value stream and disregard any indices that start
// before the first row.
if (s->col.max_level[level_type::REPETITION] == 0) {
s->page.skipped_values = 0;
s->page.skipped_leaf_values = 0;
s->input_value_count = 0;
s->input_row_count = 0;
s->row_index_lower_bound = -1;
}
// for nested hierarchies, we have run a preprocess that lets us skip directly to the values
// we need to start decoding at
else {
// input_row_count translates to "how many rows we have processed so far", so since we are
// skipping directly to where we want to start decoding, set it to first_row
s->input_row_count = s->first_row;
// return the lower bound to compare (page-relative) thread row index against. Explanation:
// In the case of nested schemas, rows can span page boundaries. That is to say,
// we can encounter the first value for row X on page M, but the last value for page M
// might not be the last value for row X. page M+1 (or further) may contain the last value.
//
// This means that the first values we encounter for a given page (M+1) may not belong to the
// row indicated by chunk_row, but to the row before it that spanned page boundaries. If that
// previous row is within the overall row bounds, include the values by allowing relative row
// index -1
int const max_row = (min_row + num_rows) - 1;
if (min_row < page_start_row && max_row >= page_start_row - 1) {
s->row_index_lower_bound = -1;
} else {
s->row_index_lower_bound = s->first_row;
}
// if we're in the decoding step, jump directly to the first
// value we care about
if (s->col.column_data_base != nullptr) {
s->input_value_count = s->page.skipped_values > -1 ? s->page.skipped_values : 0;
} else {
s->input_value_count = 0;
s->input_leaf_count = 0;
s->page.skipped_values = -1;
s->page.skipped_leaf_values = -1;
}
}
__threadfence_block();
}
__syncthreads();
return true;
}
/**
* @brief Store a validity mask containing value_count bits into the output validity buffer of the
* page.
*
* @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is
* also updated
* @param[in] valid_mask The validity mask to be stored
* @param[in] value_count # of bits in the validity mask
*/
static __device__ void store_validity(PageNestingInfo* pni,
uint32_t valid_mask,
int32_t value_count)
{
int word_offset = pni->valid_map_offset / 32;
int bit_offset = pni->valid_map_offset % 32;
// if we fit entirely in the output word
if (bit_offset + value_count <= 32) {
auto relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1);
if (relevant_mask == ~0) {
pni->valid_map[word_offset] = valid_mask;
} else {
atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset));
atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset);
}
}
// we're going to spill over into the next word.
// note : writing both values here is the lazy/slow way. we could be writing just
// the first word and rolling the remaining bits over into the next call.
// however, some basic performance tests shows almost no difference between these two
// methods. More detailed performance testing might be worthwhile here.
else {
uint32_t bits_left = 32 - bit_offset;
// first word. strip bits_left bits off the beginning and store that
uint32_t relevant_mask = ((1 << bits_left) - 1);
uint32_t mask_word0 = valid_mask & relevant_mask;
atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset));
atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset);
// second word. strip the remainder of the bits off the end and store that
relevant_mask = ((1 << (value_count - bits_left)) - 1);
uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left);
atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask));
atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left);
}
pni->valid_map_offset += value_count;
}
/**
* @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level
* D to which we should considered them null or not.
*
* @param[out] start_depth The start nesting depth
* @param[out] end_depth The end nesting depth (inclusive)
* @param[out] d The definition level up to which added values are not-null. if t is out of bounds,
* d will be -1
* @param[in] s Local page information
* @param[in] input_value_count The current count of input level values we have processed
* @param[in] target_input_value_count The desired # of input level values we want to process
* @param[in] t Thread index
*/
inline __device__ void get_nesting_bounds(int& start_depth,
int& end_depth,
int& d,
page_state_s* s,
int input_value_count,
int32_t target_input_value_count,
int t)
{
start_depth = -1;
end_depth = -1;
d = -1;
if (input_value_count + t < target_input_value_count) {
int index = rolling_index(input_value_count + t);
d = s->def[index];
// if we have repetition (there are list columns involved) we have to
// bound what nesting levels we apply values to
if (s->col.max_level[level_type::REPETITION] > 0) {
int r = s->rep[index];
start_depth = s->page.nesting[r].start_depth;
end_depth = s->page.nesting[d].end_depth;
}
// for columns without repetition (even ones involving structs) we always
// traverse the entire hierarchy.
else {
start_depth = 0;
end_depth = s->col.max_nesting_depth - 1;
}
}
}
/**
* @brief Process a batch of incoming repetition/definition level values and generate
* validity, nested column offsets (where appropriate) and decoding indices.
*
* @param[in] target_input_value_count The # of repetition/definition levels to process up to
* @param[in] s Local page information
* @param[in] t Thread index
*/
static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count,
page_state_s* s,
int t)
{
// max nesting depth of the column
int const max_depth = s->col.max_nesting_depth;
// how many (input) values we've processed in the page so far
int input_value_count = s->input_value_count;
// how many rows we've processed in the page so far
int input_row_count = s->input_row_count;
// process until we've reached the target
while (input_value_count < target_input_value_count) {
// determine the nesting bounds for this thread (the range of nesting depths we
// will generate new value indices and validity bits for)
int start_depth, end_depth, d;
get_nesting_bounds(
start_depth, end_depth, d, s, input_value_count, target_input_value_count, t);
// 4 interesting things to track:
// thread_value_count : # of output values from the view of this thread
// warp_value_count : # of output values for the whole warp
//
// thread_valid_count : # of valid values from the view of this thread
// warp_valid_count : # of valid values for the whole warp
uint32_t thread_value_count, warp_value_count;
uint32_t thread_valid_count, warp_valid_count;
// track (page-relative) row index for the thread so we can compare against input bounds
// keep track of overall # of rows we've read.
int const is_new_row = start_depth == 0 ? 1 : 0;
uint32_t const warp_row_count_mask = ballot(is_new_row);
int32_t const thread_row_index =
input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1);
input_row_count += __popc(warp_row_count_mask);
// is this thread within read row bounds?
int const in_row_bounds = thread_row_index >= s->row_index_lower_bound &&
thread_row_index < (s->first_row + s->num_rows)
? 1
: 0;
// compute warp and thread value counts
uint32_t const warp_count_mask =
ballot((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0);
warp_value_count = __popc(warp_count_mask);
// Note : ((1 << t) - 1) implies "for all threads before me"
thread_value_count = __popc(warp_count_mask & ((1 << t) - 1));
// walk from 0 to max_depth
uint32_t next_thread_value_count, next_warp_value_count;
for (int s_idx = 0; s_idx < max_depth; s_idx++) {
PageNestingInfo* pni = &s->page.nesting[s_idx];
// if we are within the range of nesting levels we should be adding value indices for
int const in_nesting_bounds =
((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0;
// everything up to the max_def_level is a non-null value
uint32_t const is_valid = d >= pni->max_def_level && in_nesting_bounds ? 1 : 0;
// compute warp and thread valid counts
uint32_t const warp_valid_mask =
// for flat schemas, a simple ballot_sync gives us the correct count and bit positions
// because every value in the input matches to a value in the output
max_depth == 1
? ballot(is_valid)
:
// for nested schemas, it's more complicated. This warp will visit 32 incoming values,
// however not all of them will necessarily represent a value at this nesting level. so
// the validity bit for thread t might actually represent output value t-6. the correct
// position for thread t's bit is cur_value_count. for cuda 11 we could use
// __reduce_or_sync(), but until then we have to do a warp reduce.
WarpReduceOr32(is_valid << thread_value_count);
thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1));
warp_valid_count = __popc(warp_valid_mask);
// if this is the value column emit an index for value decoding
if (is_valid && s_idx == max_depth - 1) {
int const src_pos = pni->valid_count + thread_valid_count;
int const dst_pos = pni->value_count + thread_value_count;
// nz_idx is a mapping of src buffer indices to destination buffer indices
s->nz_idx[rolling_index(src_pos)] = dst_pos;
}
// compute warp and thread value counts for the -next- nesting level. we need to
// do this for nested schemas so that we can emit an offset for the -current- nesting
// level. more concretely : the offset for the current nesting level == current length of the
// next nesting level
if (s_idx < max_depth - 1) {
uint32_t const next_warp_count_mask =
ballot((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0);
next_warp_value_count = __popc(next_warp_count_mask);
next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1));
// if we're -not- at a leaf column and we're within nesting/row bounds
// and we have a valid data_out pointer, it implies this is a list column, so
// emit an offset.
if (in_nesting_bounds && pni->data_out != nullptr) {
int const idx = pni->value_count + thread_value_count;
cudf::size_type const ofs = s->page.nesting[s_idx + 1].value_count +
next_thread_value_count +
s->page.nesting[s_idx + 1].page_start_value;
(reinterpret_cast<cudf::size_type*>(pni->data_out))[idx] = ofs;
}
}
// nested schemas always read and write to the same bounds (that is, read and write positions
// are already pre-bounded by first_row/num_rows). flat schemas will start reading at the
// first value, even if that is before first_row, because we cannot trivially jump to
// the correct position to start reading. since we are about to write the validity vector here
// we need to adjust our computed mask to take into account the write row bounds.
int const in_write_row_bounds =
max_depth == 1
? thread_row_index >= s->first_row && thread_row_index < (s->first_row + s->num_rows)
: in_row_bounds;
int const first_thread_in_write_range =
max_depth == 1 ? __ffs(ballot(in_write_row_bounds)) - 1 : 0;
// # of bits to of the validity mask to write out
int const warp_valid_mask_bit_count =
first_thread_in_write_range < 0 ? 0 : warp_value_count - first_thread_in_write_range;
// increment count of valid values, count of total values, and update validity mask
if (!t) {
if (pni->valid_map != nullptr && warp_valid_mask_bit_count > 0) {
uint32_t const warp_output_valid_mask = warp_valid_mask >> first_thread_in_write_range;
store_validity(pni, warp_output_valid_mask, warp_valid_mask_bit_count);
pni->null_count += warp_valid_mask_bit_count - __popc(warp_output_valid_mask);
}
pni->valid_count += warp_valid_count;
pni->value_count += warp_value_count;
}
// propagate value counts for the next level
warp_value_count = next_warp_value_count;
thread_value_count = next_thread_value_count;
}
input_value_count += min(32, (target_input_value_count - input_value_count));
__syncwarp();
}
// update
if (!t) {
// update valid value count for decoding and total # of values we've processed
s->nz_count = s->page.nesting[max_depth - 1].valid_count;
s->input_value_count = input_value_count;
s->input_row_count = input_row_count;
}
}
/**
* @brief Process repetition and definition levels up to the target count of leaf values.
*
* In order to decode actual leaf values from the input stream, we need to generate the
* list of non-null value positions (page_state_s::nz_idx). We do this by processing
* the repetition and definition level streams. This process also generates validity information,
* and offset column values in the case of nested schemas. Because of the way the streams
* are encoded, this function may generate slightly more than target_leaf_count.
*
* Only runs on 1 warp.
*
* @param[in] s The local page state
* @param[in] target_leaf_count Target count of non-null leaf values to generate indices for
* @param[in] t Thread index
*/
__device__ void gpuDecodeLevels(page_state_s* s, int32_t target_leaf_count, int t)
{
bool has_repetition = s->col.max_level[level_type::REPETITION] > 0;
constexpr int batch_size = 32;
int cur_leaf_count = target_leaf_count;
while (!s->error && s->nz_count < target_leaf_count &&
s->input_value_count < s->num_input_values) {
if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); }
gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION);
__syncwarp();
// because the rep and def streams are encoded separately, we cannot request an exact
// # of values to be decoded at once. we can only process the lowest # of decoded rep/def
// levels we get.
int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION],
s->lvl_count[level_type::DEFINITION])
: s->lvl_count[level_type::DEFINITION];
// process what we got back
gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t);
cur_leaf_count = actual_leaf_count + batch_size;
__syncwarp();
}
}
/**
* @brief Process a batch of incoming repetition/definition level values to generate
* per-nesting level output column size for this page.
*
* Each page represents one piece of the overall output column. The total output (cudf)
* column sizes are the sum of the values in each individual page.
*
* @param[in] s The local page info
* @param[in] target_input_value_count The # of repetition/definition levels to process up to
* @param[in] t Thread index
* @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows
* have been computed for this page (they will only be set in the second/trim pass).
*/
static __device__ void gpuUpdatePageSizes(page_state_s* s,
int32_t target_input_value_count,
int t,
bool bounds_set)
{
// max nesting depth of the column
int max_depth = s->col.max_nesting_depth;
// bool has_repetition = s->col.max_level[level_type::REPETITION] > 0 ? true : false;
// how many input level values we've processed in the page so far
int input_value_count = s->input_value_count;
// how many leaf values we've processed in the page so far
int input_leaf_count = s->input_leaf_count;
// how many rows we've processed in the page so far
int input_row_count = s->input_row_count;
while (input_value_count < target_input_value_count) {
int start_depth, end_depth, d;
get_nesting_bounds(
start_depth, end_depth, d, s, input_value_count, target_input_value_count, t);
// count rows and leaf values
int is_new_row = start_depth == 0 ? 1 : 0;
uint32_t warp_row_count_mask = ballot(is_new_row);
int is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0;
uint32_t warp_leaf_count_mask = ballot(is_new_leaf);
// is this thread within row bounds? on the first pass we don't know the bounds, so we will be
// computing the full size of the column. on the second pass, we will know our actual row
// bounds, so the computation will cap sizes properly.
int in_row_bounds = 1;
if (bounds_set) {
// absolute row index
int32_t thread_row_index =
input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1);
in_row_bounds = thread_row_index >= s->row_index_lower_bound &&
thread_row_index < (s->first_row + s->num_rows)
? 1
: 0;
uint32_t row_bounds_mask = ballot(in_row_bounds);
int first_thread_in_range = __ffs(row_bounds_mask) - 1;
// if we've found the beginning of the first row, mark down the position
// in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values)
if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) {
// how many values we've skipped in the rep/def levels
s->page.skipped_values = input_value_count + first_thread_in_range;
// how many values we've skipped in the actual data stream
s->page.skipped_leaf_values =
input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1));
}
}
// increment counts across all nesting depths
for (int s_idx = 0; s_idx < max_depth; s_idx++) {
// if we are within the range of nesting levels we should be adding value indices for
int in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0;
uint32_t count_mask = ballot(in_nesting_bounds);
if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); }
}
input_value_count += min(32, (target_input_value_count - input_value_count));
input_row_count += __popc(warp_row_count_mask);
input_leaf_count += __popc(warp_leaf_count_mask);
}
// update final page value count
if (!t) {
s->input_value_count = target_input_value_count;
s->input_leaf_count = input_leaf_count;
s->input_row_count = input_row_count;
}
}
/**
* @brief Kernel for computing per-page column size information for all nesting levels.
*
* This function will write out the size field for each level of nesting.
*
* @param[in,out] pages List of pages
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
* @param[in] min_row Row index to start reading at
* @param[in] num_rows Maximum number of rows to read
* @param[in] num_chunks Number of column chunks
* @param[in] trim_pass Whether or not this is the trim pass. We first have to compute
* the full size information of every page before we come through in a second (trim) pass
* to determine what subset of rows in this page we should be reading.
*/
// blockDim {block_size,1,1}
__global__ void __launch_bounds__(block_size) gpuComputePageSizes(PageInfo* pages,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks,
bool trim_pass)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int page_idx = blockIdx.x;
int t = threadIdx.x;
PageInfo* pp = &pages[page_idx];
if (!setupLocalPageInfo(
s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) {
return;
}
// zero sizes
int d = 0;
while (d < s->page.num_nesting_levels) {
if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; }
d += blockDim.x;
}
if (!t) {
s->page.skipped_values = -1;
s->page.skipped_leaf_values = -1;
s->input_row_count = 0;
s->input_value_count = 0;
// if this isn't the trim pass, make sure we visit absolutely everything
if (!trim_pass) {
s->first_row = 0;
s->num_rows = INT_MAX;
s->row_index_lower_bound = -1;
}
}
__syncthreads();
bool has_repetition = s->col.max_level[level_type::REPETITION] > 0;
// optimization : it might be useful to have a version of gpuDecodeStream that could go wider than
// 1 warp. Currently it only uses 1 warp so that it can overlap work with the value decoding step
// when in the actual value decoding kernel. However, during this preprocess step we have no such
// limits - we could go as wide as block_size
if (t < 32) {
constexpr int batch_size = 32;
int target_input_count = batch_size;
while (!s->error && s->input_value_count < s->num_input_values) {
// decode repetition and definition levels. these will attempt to decode at
// least up to the target, but may decode a few more.
if (has_repetition) {
gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION);
}
gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION);
__syncwarp();
// we may have decoded different amounts from each stream, so only process what we've been
int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION],
s->lvl_count[level_type::DEFINITION])
: s->lvl_count[level_type::DEFINITION];
// process what we got back
gpuUpdatePageSizes(s, actual_input_count, t, trim_pass);
target_input_count = actual_input_count + batch_size;
__syncwarp();
}
}
// update # rows in the actual page
if (!t) {
pp->num_rows = s->page.nesting[0].size;
pp->skipped_values = s->page.skipped_values;
pp->skipped_leaf_values = s->page.skipped_leaf_values;
}
}
/**
* @brief Kernel for co the column data stored in the pages
*
* This function will write the page data and the page data's validity to the
* output specified in the page's column chunk. If necessary, additional
* conversion will be performed to translate from the Parquet datatype to
* desired output datatype (ex. 32-bit to 16-bit, string to hash).
*
* @param[in] pages List of pages
* @param[in,out] chunks List of column chunks
* @param[in] min_row Row index to start reading at
* @param[in] num_rows Maximum number of rows to read
* @param[in] num_chunks Number of column chunks
*/
// blockDim {block_size,1,1}
__global__ void __launch_bounds__(block_size) gpuDecodePageData(PageInfo* pages,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int page_idx = blockIdx.x;
int t = threadIdx.x;
int out_thread0;
if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; }
if (s->dict_base) {
out_thread0 = (s->dict_bits > 0) ? 64 : 32;
} else {
out_thread0 =
((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32;
}
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t skipped_leaf_values = s->page.skipped_leaf_values;
while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) {
int target_pos;
int src_pos = s->src_pos;
if (t < out_thread0) {
target_pos =
min(src_pos + 2 * (block_size - out_thread0), s->nz_count + (block_size - out_thread0));
} else {
target_pos = min(s->nz_count, src_pos + block_size - out_thread0);
if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); }
}
__syncthreads();
if (t < 32) {
// decode repetition and definition levels.
// - update validity vectors
// - updates offsets (for nested columns)
// - produces non-NULL value indices in s->nz_idx for subsequent decoding
gpuDecodeLevels(s, target_pos, t);
} else if (t < out_thread0) {
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t src_target_pos = target_pos + skipped_leaf_values;
// WARP1: Decode dictionary indices, booleans or string positions
if (s->dict_base) {
src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f);
} else if ((s->col.data_type & 7) == BOOLEAN) {
src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f);
} else if ((s->col.data_type & 7) == BYTE_ARRAY) {
gpuInitStringDescriptors(s, src_target_pos, t & 0x1f);
}
if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; }
} else {
// WARP1..WARP3: Decode values
int dtype = s->col.data_type & 7;
src_pos += t - out_thread0;
// the position in the output column/buffer
int dst_pos = s->nz_idx[rolling_index(src_pos)];
// for the flat hierarchy case we will be reading from the beginning of the value stream,
// regardless of the value of first_row. so adjust our destination offset accordingly.
// example:
// - user has passed skip_rows = 2, so our first_row to output is 2
// - the row values we get from nz_idx will be
// 0, 1, 2, 3, 4 ....
// - by shifting these values by first_row, the sequence becomes
// -1, -2, 0, 1, 2 ...
// - so we will end up ignoring the first two input rows, and input rows 2..n will
// get written to the output starting at position 0.
//
if (s->col.max_nesting_depth == 1) { dst_pos -= s->first_row; }
// target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values
// before first_row) in the flat hierarchy case.
if (src_pos < target_pos && dst_pos >= 0) {
// src_pos represents the logical row position we want to read from. But in the case of
// nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position
// has to take into account the # of values we have to skip in the page to get to the
// desired logical row. For flat hierarchies, skipped_leaf_values will always be 0.
uint32_t val_src_pos = src_pos + skipped_leaf_values;
// nesting level that is storing actual leaf values
int leaf_level_index = s->col.max_nesting_depth - 1;
uint32_t dtype_len = s->dtype_len;
void* dst =
s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len;
if (dtype == BYTE_ARRAY) {
gpuOutputString(s, val_src_pos, dst);
} else if (dtype == BOOLEAN) {
gpuOutputBoolean(s, val_src_pos, static_cast<uint8_t*>(dst));
} else if (s->col.converted_type == DECIMAL) {
switch (dtype) {
case INT32: gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); break;
case INT64: gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); break;
default:
if (s->dtype_len_in <= sizeof(int32_t)) {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int32_t*>(dst));
} else if (s->dtype_len_in <= sizeof(int64_t)) {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int64_t*>(dst));
} else {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<__int128_t*>(dst));
}
break;
}
} else if (dtype == INT96) {
gpuOutputInt96Timestamp(s, val_src_pos, static_cast<int64_t*>(dst));
} else if (dtype_len == 8) {
if (s->ts_scale) {
gpuOutputInt64Timestamp(s, val_src_pos, static_cast<int64_t*>(dst));
} else {
gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst));
}
} else if (dtype_len == 4) {
gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst));
} else {
gpuOutputGeneric(s, val_src_pos, static_cast<uint8_t*>(dst), dtype_len);
}
}
if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; }
}
__syncthreads();
}
}
struct chunk_row_output_iter {
PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
__device__ void operator=(value_type v) { p->chunk_row = v; }
};
struct start_offset_output_iterator {
PageInfo* pages;
int* page_indices;
int cur_index;
int src_col_schema;
int nesting_depth;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ start_offset_output_iterator operator+(int i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, src_col_schema, nesting_depth};
}
__host__ __device__ void operator++() { cur_index++; }
__device__ reference operator[](int i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(int index)
{
PageInfo const& p = pages[page_indices[index]];
if (p.src_col_schema != src_col_schema || p.flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; }
return p.nesting[nesting_depth].page_start_value;
}
};
/**
* @copydoc cudf::io::parquet::gpu::PreprocessColumnData
*/
void PreprocessColumnData(hostdevice_vector<PageInfo>& pages,
hostdevice_vector<ColumnChunkDesc> const& chunks,
std::vector<input_column_info>& input_columns,
std::vector<cudf::io::detail::column_buffer>& output_columns,
size_t num_rows,
size_t min_row,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
// computes:
// PageNestingInfo::size for each level of nesting, for each page.
// The output from this does not take row bounds (num_rows, min_row) into account
hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false);
stream.synchronize();
// computes:
// PageInfo::chunk_row for all pages
auto key_input = thrust::make_transform_iterator(
pages.device_ptr(), [] __device__(PageInfo const& page) { return page.chunk_idx; });
auto page_input = thrust::make_transform_iterator(
pages.device_ptr(), [] __device__(PageInfo const& page) { return page.num_rows; });
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// computes:
// PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account.
// PageInfo::skipped_values, which tells us where to start decoding in the input
hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true);
// retrieve pages back (PageInfo::num_rows has been set. if we don't bring it
// back, this value will get overwritten later on).
pages.device_to_host(stream, true);
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To use exclusive_scan_by_key, the ordering we actually want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
rmm::device_uvector<int> page_keys(pages.size(), stream);
rmm::device_uvector<int> page_index(pages.size(), stream);
{
thrust::transform(rmm::exec_policy(stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
[] __device__(PageInfo const& page) { return page.src_col_schema; });
thrust::sequence(rmm::exec_policy(stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// compute output column sizes by examining the pages of the -input- columns
for (size_t idx = 0; idx < input_columns.size(); idx++) {
auto const& input_col = input_columns[idx];
auto src_col_schema = input_col.schema_idx;
size_t max_depth = input_col.nesting_depth();
auto* cols = &output_columns;
for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// size iterator. indexes pages by sorted order
auto size_input = thrust::make_transform_iterator(
page_index.begin(),
[src_col_schema, l_idx, pages = pages.device_ptr()] __device__(int index) {
auto const& page = pages[index];
if (page.src_col_schema != src_col_schema || page.flags & PAGEINFO_FLAGS_DICTIONARY) {
return 0;
}
return page.nesting[l_idx].size;
});
// compute column size.
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if (out_buf.size == 0) {
int size = thrust::reduce(rmm::exec_policy(stream), size_input, size_input + pages.size());
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, stream, mr);
}
// compute per-page start offset
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
size_input,
start_offset_output_iterator{pages.device_ptr(),
page_index.begin(),
0,
static_cast<int>(src_col_schema),
static_cast<int>(l_idx)});
}
}
}
/**
* @copydoc cudf::io::parquet::gpu::DecodePageData
*/
void __host__ DecodePageData(hostdevice_vector<PageInfo>& pages,
hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t num_rows,
size_t min_row,
rmm::cuda_stream_view stream)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
hipLaunchKernelGGL(( gpuDecodePageData), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size());
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
d84d4022130e6c9596a75cfae95c8e04e7a6eac8.cu
|
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/column_buffer.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/utilities/bit.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
constexpr int block_size = 128;
constexpr int non_zero_buffer_size = block_size * 2;
inline __device__ uint32_t rotl32(uint32_t x, uint32_t r)
{
return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r));
}
inline __device__ int rolling_index(int index) { return index & (non_zero_buffer_size - 1); }
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct page_state_s {
const uint8_t* data_start;
const uint8_t* data_end;
const uint8_t* lvl_end;
const uint8_t* dict_base; // ptr to dictionary page data
int32_t dict_size; // size of dictionary data
int32_t first_row; // First row in page to output
int32_t num_rows; // Rows in page to decode (including rows to be skipped)
int32_t first_output_value; // First value in page to output
int32_t num_input_values; // total # of input/level values in the page
int32_t dtype_len; // Output data type length
int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit
int32_t dict_bits; // # of bits to store dictionary indices
uint32_t dict_run;
int32_t dict_val;
uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep]
int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep]
int32_t error;
PageInfo page;
ColumnChunkDesc col;
// (leaf) value decoding
int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer)
int32_t dict_pos; // write position of dictionary indices
int32_t src_pos; // input read position of final output value
int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale
uint32_t nz_idx[non_zero_buffer_size]; // circular buffer of non-null value positions
uint32_t dict_idx[non_zero_buffer_size]; // Dictionary index, boolean, or string offset values
uint32_t str_len[non_zero_buffer_size]; // String length for plain encoding of strings
// repetition/definition level decoding
int32_t input_value_count; // how many values of the input we've processed
int32_t input_row_count; // how many rows of the input we've processed
int32_t input_leaf_count; // how many leaf values of the input we've processed
uint32_t rep[non_zero_buffer_size]; // circular buffer of repetition level values
uint32_t def[non_zero_buffer_size]; // circular buffer of definition level values
const uint8_t* lvl_start[NUM_LEVEL_TYPES]; // [def,rep]
int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded
int32_t row_index_lower_bound; // lower bound of row indices we should process
};
/**
* @brief Computes a 32-bit hash when given a byte stream and range.
*
* MurmurHash3_32 implementation from
* https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
*
* MurmurHash3 was written by Austin Appleby, and is placed in the public
* domain. The author hereby disclaims copyright to this source code.
*
* @param[in] key The input data to hash
* @param[in] len The length of the input data
* @param[in] seed An initialization value
*
* @return The hash value
*/
__device__ uint32_t device_str2hash32(const char* key, size_t len, uint32_t seed = 33)
{
const auto* p = reinterpret_cast<const uint8_t*>(key);
uint32_t h1 = seed, k1;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
int l = len;
// body
while (l >= 4) {
k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
k1 *= c1;
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = rotl32(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
p += 4;
l -= 4;
}
// tail
k1 = 0;
switch (l) {
case 3: k1 ^= p[2] << 16;
case 2: k1 ^= p[1] << 8;
case 1:
k1 ^= p[0];
k1 *= c1;
k1 = rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
}
// finalization
h1 ^= len;
h1 ^= h1 >> 16;
h1 *= 0x85ebca6b;
h1 ^= h1 >> 13;
h1 *= 0xc2b2ae35;
h1 ^= h1 >> 16;
return h1;
}
/**
* @brief Read a 32-bit varint integer
*
* @param[in,out] cur The current data position, updated after the read
* @param[in] end The end data position
*
* @return The 32-bit value read
*/
inline __device__ uint32_t get_vlq32(const uint8_t*& cur, const uint8_t* end)
{
uint32_t v = *cur++;
if (v >= 0x80 && cur < end) {
v = (v & 0x7f) | ((*cur++) << 7);
if (v >= (0x80 << 7) && cur < end) {
v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14);
if (v >= (0x80 << 14) && cur < end) {
v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21);
if (v >= (0x80 << 21) && cur < end) {
v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28);
}
}
}
}
return v;
}
/**
* @brief Parse the beginning of the level section (definition or repetition),
* initializes the initial RLE run & value, and returns the section length
*
* @param[in,out] s The page state
* @param[in] cur The current data position
* @param[in] end The end of the data
* @param[in] level_bits The bits required
*
* @return The length of the section
*/
__device__ uint32_t InitLevelSection(page_state_s* s,
const uint8_t* cur,
const uint8_t* end,
level_type lvl)
{
int32_t len;
int level_bits = s->col.level_bits[lvl];
Encoding encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding
: s->page.repetition_level_encoding;
if (level_bits == 0) {
len = 0;
s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value
s->initial_rle_value[lvl] = 0;
s->lvl_start[lvl] = cur;
} else if (encoding == Encoding::RLE) {
if (cur + 4 < end) {
uint32_t run;
len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24);
cur += 4;
run = get_vlq32(cur, end);
s->initial_rle_run[lvl] = run;
if (!(run & 1)) {
int v = (cur < end) ? cur[0] : 0;
cur++;
if (level_bits > 8) {
v |= ((cur < end) ? cur[0] : 0) << 8;
cur++;
}
s->initial_rle_value[lvl] = v;
}
s->lvl_start[lvl] = cur;
if (cur > end) { s->error = 2; }
} else {
len = 0;
s->error = 2;
}
} else if (encoding == Encoding::BIT_PACKED) {
len = (s->page.num_input_values * level_bits + 7) >> 3;
s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run
s->initial_rle_value[lvl] = 0;
s->lvl_start[lvl] = cur;
} else {
s->error = 3;
len = 0;
}
return (uint32_t)len;
}
/**
* @brief Decode values out of a definition or repetition stream
*
* @param[in,out] s Page state input/output
* @param[in] t target_count Target count of stream values on output
* @param[in] t Warp0 thread ID (0..31)
* @param[in] lvl The level type we are decoding - DEFINITION or REPETITION
*/
__device__ void gpuDecodeStream(
uint32_t* output, page_state_s* s, int32_t target_count, int t, level_type lvl)
{
const uint8_t* cur_def = s->lvl_start[lvl];
const uint8_t* end = s->lvl_end;
uint32_t level_run = s->initial_rle_run[lvl];
int32_t level_val = s->initial_rle_value[lvl];
int level_bits = s->col.level_bits[lvl];
int32_t num_input_values = s->num_input_values;
int32_t value_count = s->lvl_count[lvl];
int32_t batch_coded_count = 0;
while (value_count < target_count && value_count < num_input_values) {
int batch_len;
if (level_run <= 1) {
// Get a new run symbol from the byte stream
int sym_len = 0;
if (!t) {
const uint8_t* cur = cur_def;
if (cur < end) { level_run = get_vlq32(cur, end); }
if (!(level_run & 1)) {
if (cur < end) level_val = cur[0];
cur++;
if (level_bits > 8) {
if (cur < end) level_val |= cur[0] << 8;
cur++;
}
}
if (cur > end || level_run <= 1) { s->error = 0x10; }
sym_len = (int32_t)(cur - cur_def);
__threadfence_block();
}
sym_len = shuffle(sym_len);
level_val = shuffle(level_val);
level_run = shuffle(level_run);
cur_def += sym_len;
}
if (s->error) { break; }
batch_len = min(num_input_values - value_count, 32);
if (level_run & 1) {
// Literal run
int batch_len8;
batch_len = min(batch_len, (level_run >> 1) * 8);
batch_len8 = (batch_len + 7) >> 3;
if (t < batch_len) {
int bitpos = t * level_bits;
const uint8_t* cur = cur_def + (bitpos >> 3);
bitpos &= 7;
if (cur < end) level_val = cur[0];
cur++;
if (level_bits > 8 - bitpos && cur < end) {
level_val |= cur[0] << 8;
cur++;
if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16;
}
level_val = (level_val >> bitpos) & ((1 << level_bits) - 1);
}
level_run -= batch_len8 * 2;
cur_def += batch_len8 * level_bits;
} else {
// Repeated value
batch_len = min(batch_len, level_run >> 1);
level_run -= batch_len * 2;
}
if (t < batch_len) {
int idx = value_count + t;
output[idx & (non_zero_buffer_size - 1)] = level_val;
}
batch_coded_count += batch_len;
value_count += batch_len;
}
// update the stream info
if (!t) {
s->lvl_start[lvl] = cur_def;
s->initial_rle_run[lvl] = level_run;
s->initial_rle_value[lvl] = level_val;
s->lvl_count[lvl] = value_count;
}
}
/**
* @brief Performs RLE decoding of dictionary indexes
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to
* 31)
* @param[in] t Warp1 thread ID (0..31)
*
* @return The new output position
*/
__device__ int gpuDecodeDictionaryIndices(volatile page_state_s* s, int target_pos, int t)
{
const uint8_t* end = s->data_end;
int dict_bits = s->dict_bits;
int pos = s->dict_pos;
while (pos < target_pos) {
int is_literal, batch_len;
if (!t) {
uint32_t run = s->dict_run;
const uint8_t* cur = s->data_start;
if (run <= 1) {
run = (cur < end) ? get_vlq32(cur, end) : 0;
if (!(run & 1)) {
// Repeated value
int bytecnt = (dict_bits + 7) >> 3;
if (cur + bytecnt <= end) {
int32_t run_val = cur[0];
if (bytecnt > 1) {
run_val |= cur[1] << 8;
if (bytecnt > 2) {
run_val |= cur[2] << 16;
if (bytecnt > 3) { run_val |= cur[3] << 24; }
}
}
s->dict_val = run_val & ((1 << dict_bits) - 1);
}
cur += bytecnt;
}
}
if (run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(32, (int)(run >> 1) * 8), 1);
batch_len_div8 = (batch_len + 7) >> 3;
run -= batch_len_div8 * 2;
cur += batch_len_div8 * dict_bits;
} else {
batch_len = max(min(32, (int)(run >> 1)), 1);
run -= batch_len * 2;
}
s->dict_run = run;
s->data_start = cur;
is_literal = run & 1;
__threadfence_block();
}
__syncwarp();
is_literal = shuffle(is_literal);
batch_len = shuffle(batch_len);
if (t < batch_len) {
int dict_idx = s->dict_val;
if (is_literal) {
int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits;
const uint8_t* p = s->data_start + (ofs >> 3);
ofs &= 7;
if (p < end) {
uint32_t c = 8 - ofs;
dict_idx = (*p++) >> ofs;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) {
dict_idx |= (*p++) << c;
c += 8;
if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; }
}
}
dict_idx &= (1 << dict_bits) - 1;
}
}
s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx;
}
pos += batch_len;
}
return pos;
}
/**
* @brief Performs RLE decoding of dictionary indexes, for when dict_size=1
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target write position
* @param[in] t Thread ID
*
* @return The new output position
*/
__device__ int gpuDecodeRleBooleans(volatile page_state_s* s, int target_pos, int t)
{
const uint8_t* end = s->data_end;
int pos = s->dict_pos;
while (pos < target_pos) {
int is_literal, batch_len;
if (!t) {
uint32_t run = s->dict_run;
const uint8_t* cur = s->data_start;
if (run <= 1) {
run = (cur < end) ? get_vlq32(cur, end) : 0;
if (!(run & 1)) {
// Repeated value
s->dict_val = (cur < end) ? cur[0] & 1 : 0;
cur++;
}
}
if (run & 1) {
// Literal batch: must output a multiple of 8, except for the last batch
int batch_len_div8;
batch_len = max(min(32, (int)(run >> 1) * 8), 1);
if (batch_len >= 8) { batch_len &= ~7; }
batch_len_div8 = (batch_len + 7) >> 3;
run -= batch_len_div8 * 2;
cur += batch_len_div8;
} else {
batch_len = max(min(32, (int)(run >> 1)), 1);
run -= batch_len * 2;
}
s->dict_run = run;
s->data_start = cur;
is_literal = run & 1;
__threadfence_block();
}
__syncwarp();
is_literal = shuffle(is_literal);
batch_len = shuffle(batch_len);
if (t < batch_len) {
int dict_idx;
if (is_literal) {
int32_t ofs = t - ((batch_len + 7) & ~7);
const uint8_t* p = s->data_start + (ofs >> 3);
dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0;
} else {
dict_idx = s->dict_val;
}
s->dict_idx[(pos + t) & (non_zero_buffer_size - 1)] = dict_idx;
}
pos += batch_len;
}
return pos;
}
/**
* @brief Parses the length and position of strings
*
* @param[in,out] s Page state input/output
* @param[in] target_pos Target output position
* @param[in] t Thread ID
*
* @return The new output position
*/
__device__ void gpuInitStringDescriptors(volatile page_state_s* s, int target_pos, int t)
{
int pos = s->dict_pos;
// This step is purely serial
if (!t) {
const uint8_t* cur = s->data_start;
int dict_size = s->dict_size;
int k = s->dict_val;
while (pos < target_pos) {
int len;
if (k + 4 <= dict_size) {
len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24);
k += 4;
if (k + len > dict_size) { len = 0; }
} else {
len = 0;
}
s->dict_idx[pos & (non_zero_buffer_size - 1)] = k;
s->str_len[pos & (non_zero_buffer_size - 1)] = len;
k += len;
pos++;
}
s->dict_val = k;
__threadfence_block();
}
}
/**
* @brief Output a string descriptor
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash)
*/
inline __device__ void gpuOutputString(volatile page_state_s* s, int src_pos, void* dstv)
{
const char* ptr = nullptr;
size_t len = 0;
if (s->dict_base) {
// String dictionary
uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] *
sizeof(string_index_pair)
: 0;
if (dict_pos < (uint32_t)s->dict_size) {
const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos);
ptr = src->first;
len = src->second;
}
} else {
// Plain encoding
uint32_t dict_pos = s->dict_idx[src_pos & (non_zero_buffer_size - 1)];
if (dict_pos <= (uint32_t)s->dict_size) {
ptr = reinterpret_cast<const char*>(s->data_start + dict_pos);
len = s->str_len[src_pos & (non_zero_buffer_size - 1)];
}
}
if (s->dtype_len == 4) {
// Output hash
*static_cast<uint32_t*>(dstv) = device_str2hash32(ptr, len);
} else {
// Output string descriptor
auto* dst = static_cast<string_index_pair*>(dstv);
dst->first = ptr;
dst->second = len;
}
}
/**
* @brief Output a boolean
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
inline __device__ void gpuOutputBoolean(volatile page_state_s* s, int src_pos, uint8_t* dst)
{
*dst = s->dict_idx[src_pos & (non_zero_buffer_size - 1)];
}
/**
* @brief Store a 32-bit data element
*
* @param[out] dst ptr to output
* @param[in] src8 raw input bytes
* @param[in] dict_pos byte position in dictionary
* @param[in] dict_size size of dictionary
*/
inline __device__ void gpuStoreOutput(uint32_t* dst,
const uint8_t* src8,
uint32_t dict_pos,
uint32_t dict_size)
{
uint32_t bytebuf;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos < dict_size) {
bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos);
if (ofs) {
uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs);
}
} else {
bytebuf = 0;
}
*dst = bytebuf;
}
/**
* @brief Store a 64-bit data element
*
* @param[out] dst ptr to output
* @param[in] src8 raw input bytes
* @param[in] dict_pos byte position in dictionary
* @param[in] dict_size size of dictionary
*/
inline __device__ void gpuStoreOutput(uint2* dst,
const uint8_t* src8,
uint32_t dict_pos,
uint32_t dict_size)
{
uint2 v;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos < dict_size) {
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, next, ofs);
}
} else {
v.x = v.y = 0;
}
*dst = v;
}
/**
* @brief Convert an INT96 Spark timestamp to 64-bit timestamp
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[out] dst Pointer to row output data
*/
inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst)
{
using cuda::std::chrono::duration_cast;
const uint8_t* src8;
uint32_t dict_pos, dict_size = s->dict_size, ofs;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
src8 = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
src8 = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos + 4 >= dict_size) {
*dst = 0;
return;
}
uint3 v;
int64_t nanos, days;
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
v.z = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 12);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, v.z, ofs);
v.z = __funnelshift_r(v.z, next, ofs);
}
nanos = v.y;
nanos <<= 32;
nanos |= v.x;
// Convert from Julian day at noon to UTC seconds
days = static_cast<int32_t>(v.z);
cudf::duration_D d_d{
days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow
*dst = [&]() {
switch (s->col.ts_clock_rate) {
case 1: // seconds
return duration_cast<duration_s>(d_d).count() +
duration_cast<duration_s>(duration_ns{nanos}).count();
case 1'000: // milliseconds
return duration_cast<duration_ms>(d_d).count() +
duration_cast<duration_ms>(duration_ns{nanos}).count();
case 1'000'000: // microseconds
return duration_cast<duration_us>(d_d).count() +
duration_cast<duration_us>(duration_ns{nanos}).count();
case 1'000'000'000: // nanoseconds
default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos;
}
}();
}
/**
* @brief Output a 64-bit timestamp
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s* s, int src_pos, int64_t* dst)
{
const uint8_t* src8;
uint32_t dict_pos, dict_size = s->dict_size, ofs;
int64_t ts;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
src8 = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
src8 = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
if (dict_pos + 4 < dict_size) {
uint2 v;
int64_t val;
int32_t ts_scale;
v.x = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 0);
v.y = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
if (ofs) {
uint32_t next = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 8);
v.x = __funnelshift_r(v.x, v.y, ofs);
v.y = __funnelshift_r(v.y, next, ofs);
}
val = v.y;
val <<= 32;
val |= v.x;
// Output to desired clock rate
ts_scale = s->ts_scale;
if (ts_scale < 0) {
// round towards negative infinity
int sign = (val < 0);
ts = ((val + sign) / -ts_scale) + sign;
} else {
ts = val * ts_scale;
}
} else {
ts = 0;
}
*dst = ts;
}
/**
* @brief Output a fixed-length byte array as int.
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
template <typename T>
__device__ void gpuOutputFixedLenByteArrayAsInt(volatile page_state_s* s, int src_pos, T* dst)
{
uint32_t const dtype_len_in = s->dtype_len_in;
uint8_t const* data = s->dict_base ? s->dict_base : s->data_start;
uint32_t const pos =
(s->dict_base ? ((s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0)
: src_pos) *
dtype_len_in;
uint32_t const dict_size = s->dict_size;
T unscaled = 0;
for (unsigned int i = 0; i < dtype_len_in; i++) {
uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0;
unscaled = (unscaled << 8) | v;
}
// Shift the unscaled value up and back down when it isn't all 8 bytes,
// which sign extend the value for correctly representing negative numbers.
if (dtype_len_in < sizeof(T)) {
unscaled <<= (sizeof(T) - dtype_len_in) * 8;
unscaled >>= (sizeof(T) - dtype_len_in) * 8;
}
*dst = unscaled;
}
/**
* @brief Output a small fixed-length value
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst Pointer to row output data
*/
template <typename T>
inline __device__ void gpuOutputFast(volatile page_state_s* s, int src_pos, T* dst)
{
const uint8_t* dict;
uint32_t dict_pos, dict_size = s->dict_size;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
dict = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
dict = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
gpuStoreOutput(dst, dict, dict_pos, dict_size);
}
/**
* @brief Output a N-byte value
*
* @param[in,out] s Page state input/output
* @param[in] src_pos Source position
* @param[in] dst8 Pointer to row output data
* @param[in] len Length of element
*/
static __device__ void gpuOutputGeneric(volatile page_state_s* s,
int src_pos,
uint8_t* dst8,
int len)
{
const uint8_t* dict;
uint32_t dict_pos, dict_size = s->dict_size;
if (s->dict_base) {
// Dictionary
dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (non_zero_buffer_size - 1)] : 0;
dict = s->dict_base;
} else {
// Plain
dict_pos = src_pos;
dict = s->data_start;
}
dict_pos *= (uint32_t)s->dtype_len_in;
if (len & 3) {
// Generic slow path
for (unsigned int i = 0; i < len; i++) {
dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0;
}
} else {
// Copy 4 bytes at a time
const uint8_t* src8 = dict;
unsigned int ofs = 3 & reinterpret_cast<size_t>(src8);
src8 -= ofs; // align to 32-bit boundary
ofs <<= 3; // bytes -> bits
for (unsigned int i = 0; i < len; i += 4) {
uint32_t bytebuf;
if (dict_pos < dict_size) {
bytebuf = *reinterpret_cast<const uint32_t*>(src8 + dict_pos);
if (ofs) {
uint32_t bytebufnext = *reinterpret_cast<const uint32_t*>(src8 + dict_pos + 4);
bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs);
}
} else {
bytebuf = 0;
}
dict_pos += 4;
*reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf;
}
}
}
/**
* @brief Sets up block-local page state information from the global pages.
*
* @param[in, out] s The local page state to be filled in
* @param[in] p The global page to be copied from
* @param[in] chunks The global list of chunks
* @param[in] num_rows Maximum number of rows to read
* @param[in] min_row crop all rows below min_row
* @param[in] num_chunk Number of column chunks
*/
static __device__ bool setupLocalPageInfo(page_state_s* const s,
PageInfo* p,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks)
{
int t = threadIdx.x;
int chunk_idx;
// Fetch page info
if (t == 0) s->page = *p;
__syncthreads();
if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; }
// Fetch column chunk info
chunk_idx = s->page.chunk_idx;
if (t == 0) { s->col = chunks[chunk_idx]; }
// zero nested value and valid counts
int d = 0;
while (d < s->page.num_nesting_levels) {
if (d + t < s->page.num_nesting_levels) {
s->page.nesting[d + t].valid_count = 0;
s->page.nesting[d + t].value_count = 0;
s->page.nesting[d + t].null_count = 0;
}
d += blockDim.x;
}
__syncthreads();
if (!t) {
s->error = 0;
// our starting row (absolute index) is
// col.start_row == absolute row index
// page.chunk-row == relative row index within the chunk
size_t page_start_row = s->col.start_row + s->page.chunk_row;
// IMPORTANT : nested schemas can have 0 rows in a page but still have
// values. The case is:
// - On page N-1, the last row starts, with 2/6 values encoded
// - On page N, the remaining 4/6 values are encoded, but there are no new rows.
// if (s->page.num_input_values > 0 && s->page.num_rows > 0) {
if (s->page.num_input_values > 0) {
uint8_t* cur = s->page.page_data;
uint8_t* end = cur + s->page.uncompressed_page_size;
uint32_t dtype_len_out = s->col.data_type >> 3;
s->ts_scale = 0;
// Validate data type
auto const data_type = s->col.data_type & 7;
switch (data_type) {
case BOOLEAN:
s->dtype_len = 1; // Boolean are stored as 1 byte on the output
break;
case INT32:
case FLOAT: s->dtype_len = 4; break;
case INT64:
if (s->col.ts_clock_rate) {
int32_t units = 0;
if (s->col.converted_type == TIME_MILLIS or s->col.converted_type == TIMESTAMP_MILLIS) {
units = cudf::timestamp_ms::period::den;
} else if (s->col.converted_type == TIME_MICROS or
s->col.converted_type == TIMESTAMP_MICROS) {
units = cudf::timestamp_us::period::den;
} else if (s->col.logical_type.TIMESTAMP.unit.isset.NANOS) {
units = cudf::timestamp_ns::period::den;
}
if (units and units != s->col.ts_clock_rate) {
s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate)
: (s->col.ts_clock_rate / units);
}
}
// Fall through to DOUBLE
case DOUBLE: s->dtype_len = 8; break;
case INT96: s->dtype_len = 12; break;
case BYTE_ARRAY: s->dtype_len = sizeof(string_index_pair); break;
default: // FIXED_LEN_BYTE_ARRAY:
s->dtype_len = dtype_len_out;
s->error |= (s->dtype_len <= 0);
break;
}
// Special check for downconversions
s->dtype_len_in = s->dtype_len;
if (s->col.converted_type == DECIMAL && data_type == FIXED_LEN_BYTE_ARRAY) {
s->dtype_len = s->dtype_len <= sizeof(int32_t) ? sizeof(int32_t)
: s->dtype_len <= sizeof(int64_t) ? sizeof(int64_t)
: sizeof(__int128_t);
} else if (data_type == INT32) {
if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output
if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output
} else if (data_type == BYTE_ARRAY && dtype_len_out == 4) {
s->dtype_len = 4; // HASH32 output
} else if (data_type == INT96) {
s->dtype_len = 8; // Convert to 64-bit timestamp
}
// first row within the page to output
if (page_start_row >= min_row) {
s->first_row = 0;
} else {
s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows);
}
// # of rows within the page to output
s->num_rows = s->page.num_rows;
if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) {
s->num_rows =
(int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0));
}
// during the decoding step we need to offset the global output buffers
// for each level of nesting so that we write to the section this page
// is responsible for.
// - for flat schemas, we can do this directly by using row counts
// - for nested schemas, these offsets are computed during the preprocess step
if (s->col.column_data_base != nullptr) {
int max_depth = s->col.max_nesting_depth;
for (int idx = 0; idx < max_depth; idx++) {
PageNestingInfo* pni = &s->page.nesting[idx];
size_t output_offset;
// schemas without lists
if (s->col.max_level[level_type::REPETITION] == 0) {
output_offset = page_start_row >= min_row ? page_start_row - min_row : 0;
}
// for schemas with lists, we've already got the exactly value precomputed
else {
output_offset = pni->page_start_value;
}
pni->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]);
if (pni->data_out != nullptr) {
// anything below max depth with a valid data pointer must be a list, so the
// element size is the size of the offset type.
uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len;
pni->data_out += (output_offset * len);
}
pni->valid_map = s->col.valid_map_base[idx];
if (pni->valid_map != nullptr) {
pni->valid_map += output_offset >> 5;
pni->valid_map_offset = (int32_t)(output_offset & 0x1f);
}
}
}
s->first_output_value = 0;
// Find the compressed size of repetition levels
cur += InitLevelSection(s, cur, end, level_type::REPETITION);
// Find the compressed size of definition levels
cur += InitLevelSection(s, cur, end, level_type::DEFINITION);
s->dict_bits = 0;
s->dict_base = nullptr;
s->dict_size = 0;
switch (s->page.encoding) {
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE_DICTIONARY:
// RLE-packed dictionary indices, first byte indicates index length in bits
if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) {
// String dictionary: use index
s->dict_base = reinterpret_cast<const uint8_t*>(s->col.str_dict_index);
s->dict_size = s->col.page_info[0].num_input_values * sizeof(string_index_pair);
} else {
s->dict_base =
s->col.page_info[0].page_data; // dictionary is always stored in the first page
s->dict_size = s->col.page_info[0].uncompressed_page_size;
}
s->dict_run = 0;
s->dict_val = 0;
s->dict_bits = (cur < end) ? *cur++ : 0;
if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; }
break;
case Encoding::PLAIN:
s->dict_size = static_cast<int32_t>(end - cur);
s->dict_val = 0;
if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; }
break;
case Encoding::RLE: s->dict_run = 0; break;
default:
s->error = 1; // Unsupported encoding
break;
}
if (cur > end) { s->error = 1; }
s->lvl_end = cur;
s->data_start = cur;
s->data_end = end;
} else {
s->error = 1;
}
s->lvl_count[level_type::REPETITION] = 0;
s->lvl_count[level_type::DEFINITION] = 0;
s->nz_count = 0;
s->num_input_values = s->page.num_input_values;
s->dict_pos = 0;
s->src_pos = 0;
// for flat hierarchies, we can't know how many leaf values to skip unless we do a full
// preprocess of the definition levels (since nulls will have no actual decodable value, there
// is no direct correlation between # of rows and # of decodable values). so we will start
// processing at the beginning of the value stream and disregard any indices that start
// before the first row.
if (s->col.max_level[level_type::REPETITION] == 0) {
s->page.skipped_values = 0;
s->page.skipped_leaf_values = 0;
s->input_value_count = 0;
s->input_row_count = 0;
s->row_index_lower_bound = -1;
}
// for nested hierarchies, we have run a preprocess that lets us skip directly to the values
// we need to start decoding at
else {
// input_row_count translates to "how many rows we have processed so far", so since we are
// skipping directly to where we want to start decoding, set it to first_row
s->input_row_count = s->first_row;
// return the lower bound to compare (page-relative) thread row index against. Explanation:
// In the case of nested schemas, rows can span page boundaries. That is to say,
// we can encounter the first value for row X on page M, but the last value for page M
// might not be the last value for row X. page M+1 (or further) may contain the last value.
//
// This means that the first values we encounter for a given page (M+1) may not belong to the
// row indicated by chunk_row, but to the row before it that spanned page boundaries. If that
// previous row is within the overall row bounds, include the values by allowing relative row
// index -1
int const max_row = (min_row + num_rows) - 1;
if (min_row < page_start_row && max_row >= page_start_row - 1) {
s->row_index_lower_bound = -1;
} else {
s->row_index_lower_bound = s->first_row;
}
// if we're in the decoding step, jump directly to the first
// value we care about
if (s->col.column_data_base != nullptr) {
s->input_value_count = s->page.skipped_values > -1 ? s->page.skipped_values : 0;
} else {
s->input_value_count = 0;
s->input_leaf_count = 0;
s->page.skipped_values = -1;
s->page.skipped_leaf_values = -1;
}
}
__threadfence_block();
}
__syncthreads();
return true;
}
/**
* @brief Store a validity mask containing value_count bits into the output validity buffer of the
* page.
*
* @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is
* also updated
* @param[in] valid_mask The validity mask to be stored
* @param[in] value_count # of bits in the validity mask
*/
static __device__ void store_validity(PageNestingInfo* pni,
uint32_t valid_mask,
int32_t value_count)
{
int word_offset = pni->valid_map_offset / 32;
int bit_offset = pni->valid_map_offset % 32;
// if we fit entirely in the output word
if (bit_offset + value_count <= 32) {
auto relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1);
if (relevant_mask == ~0) {
pni->valid_map[word_offset] = valid_mask;
} else {
atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset));
atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset);
}
}
// we're going to spill over into the next word.
// note : writing both values here is the lazy/slow way. we could be writing just
// the first word and rolling the remaining bits over into the next call.
// however, some basic performance tests shows almost no difference between these two
// methods. More detailed performance testing might be worthwhile here.
else {
uint32_t bits_left = 32 - bit_offset;
// first word. strip bits_left bits off the beginning and store that
uint32_t relevant_mask = ((1 << bits_left) - 1);
uint32_t mask_word0 = valid_mask & relevant_mask;
atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset));
atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset);
// second word. strip the remainder of the bits off the end and store that
relevant_mask = ((1 << (value_count - bits_left)) - 1);
uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left);
atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask));
atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left);
}
pni->valid_map_offset += value_count;
}
/**
* @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level
* D to which we should considered them null or not.
*
* @param[out] start_depth The start nesting depth
* @param[out] end_depth The end nesting depth (inclusive)
* @param[out] d The definition level up to which added values are not-null. if t is out of bounds,
* d will be -1
* @param[in] s Local page information
* @param[in] input_value_count The current count of input level values we have processed
* @param[in] target_input_value_count The desired # of input level values we want to process
* @param[in] t Thread index
*/
inline __device__ void get_nesting_bounds(int& start_depth,
int& end_depth,
int& d,
page_state_s* s,
int input_value_count,
int32_t target_input_value_count,
int t)
{
start_depth = -1;
end_depth = -1;
d = -1;
if (input_value_count + t < target_input_value_count) {
int index = rolling_index(input_value_count + t);
d = s->def[index];
// if we have repetition (there are list columns involved) we have to
// bound what nesting levels we apply values to
if (s->col.max_level[level_type::REPETITION] > 0) {
int r = s->rep[index];
start_depth = s->page.nesting[r].start_depth;
end_depth = s->page.nesting[d].end_depth;
}
// for columns without repetition (even ones involving structs) we always
// traverse the entire hierarchy.
else {
start_depth = 0;
end_depth = s->col.max_nesting_depth - 1;
}
}
}
/**
* @brief Process a batch of incoming repetition/definition level values and generate
* validity, nested column offsets (where appropriate) and decoding indices.
*
* @param[in] target_input_value_count The # of repetition/definition levels to process up to
* @param[in] s Local page information
* @param[in] t Thread index
*/
static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count,
page_state_s* s,
int t)
{
// max nesting depth of the column
int const max_depth = s->col.max_nesting_depth;
// how many (input) values we've processed in the page so far
int input_value_count = s->input_value_count;
// how many rows we've processed in the page so far
int input_row_count = s->input_row_count;
// process until we've reached the target
while (input_value_count < target_input_value_count) {
// determine the nesting bounds for this thread (the range of nesting depths we
// will generate new value indices and validity bits for)
int start_depth, end_depth, d;
get_nesting_bounds(
start_depth, end_depth, d, s, input_value_count, target_input_value_count, t);
// 4 interesting things to track:
// thread_value_count : # of output values from the view of this thread
// warp_value_count : # of output values for the whole warp
//
// thread_valid_count : # of valid values from the view of this thread
// warp_valid_count : # of valid values for the whole warp
uint32_t thread_value_count, warp_value_count;
uint32_t thread_valid_count, warp_valid_count;
// track (page-relative) row index for the thread so we can compare against input bounds
// keep track of overall # of rows we've read.
int const is_new_row = start_depth == 0 ? 1 : 0;
uint32_t const warp_row_count_mask = ballot(is_new_row);
int32_t const thread_row_index =
input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1);
input_row_count += __popc(warp_row_count_mask);
// is this thread within read row bounds?
int const in_row_bounds = thread_row_index >= s->row_index_lower_bound &&
thread_row_index < (s->first_row + s->num_rows)
? 1
: 0;
// compute warp and thread value counts
uint32_t const warp_count_mask =
ballot((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0);
warp_value_count = __popc(warp_count_mask);
// Note : ((1 << t) - 1) implies "for all threads before me"
thread_value_count = __popc(warp_count_mask & ((1 << t) - 1));
// walk from 0 to max_depth
uint32_t next_thread_value_count, next_warp_value_count;
for (int s_idx = 0; s_idx < max_depth; s_idx++) {
PageNestingInfo* pni = &s->page.nesting[s_idx];
// if we are within the range of nesting levels we should be adding value indices for
int const in_nesting_bounds =
((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0;
// everything up to the max_def_level is a non-null value
uint32_t const is_valid = d >= pni->max_def_level && in_nesting_bounds ? 1 : 0;
// compute warp and thread valid counts
uint32_t const warp_valid_mask =
// for flat schemas, a simple ballot_sync gives us the correct count and bit positions
// because every value in the input matches to a value in the output
max_depth == 1
? ballot(is_valid)
:
// for nested schemas, it's more complicated. This warp will visit 32 incoming values,
// however not all of them will necessarily represent a value at this nesting level. so
// the validity bit for thread t might actually represent output value t-6. the correct
// position for thread t's bit is cur_value_count. for cuda 11 we could use
// __reduce_or_sync(), but until then we have to do a warp reduce.
WarpReduceOr32(is_valid << thread_value_count);
thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1));
warp_valid_count = __popc(warp_valid_mask);
// if this is the value column emit an index for value decoding
if (is_valid && s_idx == max_depth - 1) {
int const src_pos = pni->valid_count + thread_valid_count;
int const dst_pos = pni->value_count + thread_value_count;
// nz_idx is a mapping of src buffer indices to destination buffer indices
s->nz_idx[rolling_index(src_pos)] = dst_pos;
}
// compute warp and thread value counts for the -next- nesting level. we need to
// do this for nested schemas so that we can emit an offset for the -current- nesting
// level. more concretely : the offset for the current nesting level == current length of the
// next nesting level
if (s_idx < max_depth - 1) {
uint32_t const next_warp_count_mask =
ballot((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0);
next_warp_value_count = __popc(next_warp_count_mask);
next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1));
// if we're -not- at a leaf column and we're within nesting/row bounds
// and we have a valid data_out pointer, it implies this is a list column, so
// emit an offset.
if (in_nesting_bounds && pni->data_out != nullptr) {
int const idx = pni->value_count + thread_value_count;
cudf::size_type const ofs = s->page.nesting[s_idx + 1].value_count +
next_thread_value_count +
s->page.nesting[s_idx + 1].page_start_value;
(reinterpret_cast<cudf::size_type*>(pni->data_out))[idx] = ofs;
}
}
// nested schemas always read and write to the same bounds (that is, read and write positions
// are already pre-bounded by first_row/num_rows). flat schemas will start reading at the
// first value, even if that is before first_row, because we cannot trivially jump to
// the correct position to start reading. since we are about to write the validity vector here
// we need to adjust our computed mask to take into account the write row bounds.
int const in_write_row_bounds =
max_depth == 1
? thread_row_index >= s->first_row && thread_row_index < (s->first_row + s->num_rows)
: in_row_bounds;
int const first_thread_in_write_range =
max_depth == 1 ? __ffs(ballot(in_write_row_bounds)) - 1 : 0;
// # of bits to of the validity mask to write out
int const warp_valid_mask_bit_count =
first_thread_in_write_range < 0 ? 0 : warp_value_count - first_thread_in_write_range;
// increment count of valid values, count of total values, and update validity mask
if (!t) {
if (pni->valid_map != nullptr && warp_valid_mask_bit_count > 0) {
uint32_t const warp_output_valid_mask = warp_valid_mask >> first_thread_in_write_range;
store_validity(pni, warp_output_valid_mask, warp_valid_mask_bit_count);
pni->null_count += warp_valid_mask_bit_count - __popc(warp_output_valid_mask);
}
pni->valid_count += warp_valid_count;
pni->value_count += warp_value_count;
}
// propagate value counts for the next level
warp_value_count = next_warp_value_count;
thread_value_count = next_thread_value_count;
}
input_value_count += min(32, (target_input_value_count - input_value_count));
__syncwarp();
}
// update
if (!t) {
// update valid value count for decoding and total # of values we've processed
s->nz_count = s->page.nesting[max_depth - 1].valid_count;
s->input_value_count = input_value_count;
s->input_row_count = input_row_count;
}
}
/**
* @brief Process repetition and definition levels up to the target count of leaf values.
*
* In order to decode actual leaf values from the input stream, we need to generate the
* list of non-null value positions (page_state_s::nz_idx). We do this by processing
* the repetition and definition level streams. This process also generates validity information,
* and offset column values in the case of nested schemas. Because of the way the streams
* are encoded, this function may generate slightly more than target_leaf_count.
*
* Only runs on 1 warp.
*
* @param[in] s The local page state
* @param[in] target_leaf_count Target count of non-null leaf values to generate indices for
* @param[in] t Thread index
*/
__device__ void gpuDecodeLevels(page_state_s* s, int32_t target_leaf_count, int t)
{
bool has_repetition = s->col.max_level[level_type::REPETITION] > 0;
constexpr int batch_size = 32;
int cur_leaf_count = target_leaf_count;
while (!s->error && s->nz_count < target_leaf_count &&
s->input_value_count < s->num_input_values) {
if (has_repetition) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); }
gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION);
__syncwarp();
// because the rep and def streams are encoded separately, we cannot request an exact
// # of values to be decoded at once. we can only process the lowest # of decoded rep/def
// levels we get.
int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION],
s->lvl_count[level_type::DEFINITION])
: s->lvl_count[level_type::DEFINITION];
// process what we got back
gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t);
cur_leaf_count = actual_leaf_count + batch_size;
__syncwarp();
}
}
/**
* @brief Process a batch of incoming repetition/definition level values to generate
* per-nesting level output column size for this page.
*
* Each page represents one piece of the overall output column. The total output (cudf)
* column sizes are the sum of the values in each individual page.
*
* @param[in] s The local page info
* @param[in] target_input_value_count The # of repetition/definition levels to process up to
* @param[in] t Thread index
* @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows
* have been computed for this page (they will only be set in the second/trim pass).
*/
static __device__ void gpuUpdatePageSizes(page_state_s* s,
int32_t target_input_value_count,
int t,
bool bounds_set)
{
// max nesting depth of the column
int max_depth = s->col.max_nesting_depth;
// bool has_repetition = s->col.max_level[level_type::REPETITION] > 0 ? true : false;
// how many input level values we've processed in the page so far
int input_value_count = s->input_value_count;
// how many leaf values we've processed in the page so far
int input_leaf_count = s->input_leaf_count;
// how many rows we've processed in the page so far
int input_row_count = s->input_row_count;
while (input_value_count < target_input_value_count) {
int start_depth, end_depth, d;
get_nesting_bounds(
start_depth, end_depth, d, s, input_value_count, target_input_value_count, t);
// count rows and leaf values
int is_new_row = start_depth == 0 ? 1 : 0;
uint32_t warp_row_count_mask = ballot(is_new_row);
int is_new_leaf = (d >= s->page.nesting[max_depth - 1].max_def_level) ? 1 : 0;
uint32_t warp_leaf_count_mask = ballot(is_new_leaf);
// is this thread within row bounds? on the first pass we don't know the bounds, so we will be
// computing the full size of the column. on the second pass, we will know our actual row
// bounds, so the computation will cap sizes properly.
int in_row_bounds = 1;
if (bounds_set) {
// absolute row index
int32_t thread_row_index =
input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1);
in_row_bounds = thread_row_index >= s->row_index_lower_bound &&
thread_row_index < (s->first_row + s->num_rows)
? 1
: 0;
uint32_t row_bounds_mask = ballot(in_row_bounds);
int first_thread_in_range = __ffs(row_bounds_mask) - 1;
// if we've found the beginning of the first row, mark down the position
// in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values)
if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) {
// how many values we've skipped in the rep/def levels
s->page.skipped_values = input_value_count + first_thread_in_range;
// how many values we've skipped in the actual data stream
s->page.skipped_leaf_values =
input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1));
}
}
// increment counts across all nesting depths
for (int s_idx = 0; s_idx < max_depth; s_idx++) {
// if we are within the range of nesting levels we should be adding value indices for
int in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0;
uint32_t count_mask = ballot(in_nesting_bounds);
if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); }
}
input_value_count += min(32, (target_input_value_count - input_value_count));
input_row_count += __popc(warp_row_count_mask);
input_leaf_count += __popc(warp_leaf_count_mask);
}
// update final page value count
if (!t) {
s->input_value_count = target_input_value_count;
s->input_leaf_count = input_leaf_count;
s->input_row_count = input_row_count;
}
}
/**
* @brief Kernel for computing per-page column size information for all nesting levels.
*
* This function will write out the size field for each level of nesting.
*
* @param[in,out] pages List of pages
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
* @param[in] min_row Row index to start reading at
* @param[in] num_rows Maximum number of rows to read
* @param[in] num_chunks Number of column chunks
* @param[in] trim_pass Whether or not this is the trim pass. We first have to compute
* the full size information of every page before we come through in a second (trim) pass
* to determine what subset of rows in this page we should be reading.
*/
// blockDim {block_size,1,1}
__global__ void __launch_bounds__(block_size) gpuComputePageSizes(PageInfo* pages,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks,
bool trim_pass)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int page_idx = blockIdx.x;
int t = threadIdx.x;
PageInfo* pp = &pages[page_idx];
if (!setupLocalPageInfo(
s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) {
return;
}
// zero sizes
int d = 0;
while (d < s->page.num_nesting_levels) {
if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; }
d += blockDim.x;
}
if (!t) {
s->page.skipped_values = -1;
s->page.skipped_leaf_values = -1;
s->input_row_count = 0;
s->input_value_count = 0;
// if this isn't the trim pass, make sure we visit absolutely everything
if (!trim_pass) {
s->first_row = 0;
s->num_rows = INT_MAX;
s->row_index_lower_bound = -1;
}
}
__syncthreads();
bool has_repetition = s->col.max_level[level_type::REPETITION] > 0;
// optimization : it might be useful to have a version of gpuDecodeStream that could go wider than
// 1 warp. Currently it only uses 1 warp so that it can overlap work with the value decoding step
// when in the actual value decoding kernel. However, during this preprocess step we have no such
// limits - we could go as wide as block_size
if (t < 32) {
constexpr int batch_size = 32;
int target_input_count = batch_size;
while (!s->error && s->input_value_count < s->num_input_values) {
// decode repetition and definition levels. these will attempt to decode at
// least up to the target, but may decode a few more.
if (has_repetition) {
gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION);
}
gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION);
__syncwarp();
// we may have decoded different amounts from each stream, so only process what we've been
int actual_input_count = has_repetition ? min(s->lvl_count[level_type::REPETITION],
s->lvl_count[level_type::DEFINITION])
: s->lvl_count[level_type::DEFINITION];
// process what we got back
gpuUpdatePageSizes(s, actual_input_count, t, trim_pass);
target_input_count = actual_input_count + batch_size;
__syncwarp();
}
}
// update # rows in the actual page
if (!t) {
pp->num_rows = s->page.nesting[0].size;
pp->skipped_values = s->page.skipped_values;
pp->skipped_leaf_values = s->page.skipped_leaf_values;
}
}
/**
* @brief Kernel for co the column data stored in the pages
*
* This function will write the page data and the page data's validity to the
* output specified in the page's column chunk. If necessary, additional
* conversion will be performed to translate from the Parquet datatype to
* desired output datatype (ex. 32-bit to 16-bit, string to hash).
*
* @param[in] pages List of pages
* @param[in,out] chunks List of column chunks
* @param[in] min_row Row index to start reading at
* @param[in] num_rows Maximum number of rows to read
* @param[in] num_chunks Number of column chunks
*/
// blockDim {block_size,1,1}
__global__ void __launch_bounds__(block_size) gpuDecodePageData(PageInfo* pages,
ColumnChunkDesc const* chunks,
size_t min_row,
size_t num_rows,
int32_t num_chunks)
{
__shared__ __align__(16) page_state_s state_g;
page_state_s* const s = &state_g;
int page_idx = blockIdx.x;
int t = threadIdx.x;
int out_thread0;
if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; }
if (s->dict_base) {
out_thread0 = (s->dict_bits > 0) ? 64 : 32;
} else {
out_thread0 =
((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32;
}
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t skipped_leaf_values = s->page.skipped_leaf_values;
while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) {
int target_pos;
int src_pos = s->src_pos;
if (t < out_thread0) {
target_pos =
min(src_pos + 2 * (block_size - out_thread0), s->nz_count + (block_size - out_thread0));
} else {
target_pos = min(s->nz_count, src_pos + block_size - out_thread0);
if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); }
}
__syncthreads();
if (t < 32) {
// decode repetition and definition levels.
// - update validity vectors
// - updates offsets (for nested columns)
// - produces non-NULL value indices in s->nz_idx for subsequent decoding
gpuDecodeLevels(s, target_pos, t);
} else if (t < out_thread0) {
// skipped_leaf_values will always be 0 for flat hierarchies.
uint32_t src_target_pos = target_pos + skipped_leaf_values;
// WARP1: Decode dictionary indices, booleans or string positions
if (s->dict_base) {
src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f);
} else if ((s->col.data_type & 7) == BOOLEAN) {
src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f);
} else if ((s->col.data_type & 7) == BYTE_ARRAY) {
gpuInitStringDescriptors(s, src_target_pos, t & 0x1f);
}
if (t == 32) { *(volatile int32_t*)&s->dict_pos = src_target_pos; }
} else {
// WARP1..WARP3: Decode values
int dtype = s->col.data_type & 7;
src_pos += t - out_thread0;
// the position in the output column/buffer
int dst_pos = s->nz_idx[rolling_index(src_pos)];
// for the flat hierarchy case we will be reading from the beginning of the value stream,
// regardless of the value of first_row. so adjust our destination offset accordingly.
// example:
// - user has passed skip_rows = 2, so our first_row to output is 2
// - the row values we get from nz_idx will be
// 0, 1, 2, 3, 4 ....
// - by shifting these values by first_row, the sequence becomes
// -1, -2, 0, 1, 2 ...
// - so we will end up ignoring the first two input rows, and input rows 2..n will
// get written to the output starting at position 0.
//
if (s->col.max_nesting_depth == 1) { dst_pos -= s->first_row; }
// target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values
// before first_row) in the flat hierarchy case.
if (src_pos < target_pos && dst_pos >= 0) {
// src_pos represents the logical row position we want to read from. But in the case of
// nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position
// has to take into account the # of values we have to skip in the page to get to the
// desired logical row. For flat hierarchies, skipped_leaf_values will always be 0.
uint32_t val_src_pos = src_pos + skipped_leaf_values;
// nesting level that is storing actual leaf values
int leaf_level_index = s->col.max_nesting_depth - 1;
uint32_t dtype_len = s->dtype_len;
void* dst =
s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len;
if (dtype == BYTE_ARRAY) {
gpuOutputString(s, val_src_pos, dst);
} else if (dtype == BOOLEAN) {
gpuOutputBoolean(s, val_src_pos, static_cast<uint8_t*>(dst));
} else if (s->col.converted_type == DECIMAL) {
switch (dtype) {
case INT32: gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst)); break;
case INT64: gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst)); break;
default:
if (s->dtype_len_in <= sizeof(int32_t)) {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int32_t*>(dst));
} else if (s->dtype_len_in <= sizeof(int64_t)) {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<int64_t*>(dst));
} else {
gpuOutputFixedLenByteArrayAsInt(s, val_src_pos, static_cast<__int128_t*>(dst));
}
break;
}
} else if (dtype == INT96) {
gpuOutputInt96Timestamp(s, val_src_pos, static_cast<int64_t*>(dst));
} else if (dtype_len == 8) {
if (s->ts_scale) {
gpuOutputInt64Timestamp(s, val_src_pos, static_cast<int64_t*>(dst));
} else {
gpuOutputFast(s, val_src_pos, static_cast<uint2*>(dst));
}
} else if (dtype_len == 4) {
gpuOutputFast(s, val_src_pos, static_cast<uint32_t*>(dst));
} else {
gpuOutputGeneric(s, val_src_pos, static_cast<uint8_t*>(dst), dtype_len);
}
}
if (t == out_thread0) { *(volatile int32_t*)&s->src_pos = target_pos; }
}
__syncthreads();
}
}
struct chunk_row_output_iter {
PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
__device__ void operator=(value_type v) { p->chunk_row = v; }
};
struct start_offset_output_iterator {
PageInfo* pages;
int* page_indices;
int cur_index;
int src_col_schema;
int nesting_depth;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ start_offset_output_iterator operator+(int i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, src_col_schema, nesting_depth};
}
__host__ __device__ void operator++() { cur_index++; }
__device__ reference operator[](int i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(int index)
{
PageInfo const& p = pages[page_indices[index]];
if (p.src_col_schema != src_col_schema || p.flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; }
return p.nesting[nesting_depth].page_start_value;
}
};
/**
* @copydoc cudf::io::parquet::gpu::PreprocessColumnData
*/
void PreprocessColumnData(hostdevice_vector<PageInfo>& pages,
hostdevice_vector<ColumnChunkDesc> const& chunks,
std::vector<input_column_info>& input_columns,
std::vector<cudf::io::detail::column_buffer>& output_columns,
size_t num_rows,
size_t min_row,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
// computes:
// PageNestingInfo::size for each level of nesting, for each page.
// The output from this does not take row bounds (num_rows, min_row) into account
gpuComputePageSizes<<<dim_grid, dim_block, 0, stream.value()>>>(
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false);
stream.synchronize();
// computes:
// PageInfo::chunk_row for all pages
auto key_input = thrust::make_transform_iterator(
pages.device_ptr(), [] __device__(PageInfo const& page) { return page.chunk_idx; });
auto page_input = thrust::make_transform_iterator(
pages.device_ptr(), [] __device__(PageInfo const& page) { return page.num_rows; });
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// computes:
// PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account.
// PageInfo::skipped_values, which tells us where to start decoding in the input
gpuComputePageSizes<<<dim_grid, dim_block, 0, stream.value()>>>(
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true);
// retrieve pages back (PageInfo::num_rows has been set. if we don't bring it
// back, this value will get overwritten later on).
pages.device_to_host(stream, true);
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To use exclusive_scan_by_key, the ordering we actually want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
rmm::device_uvector<int> page_keys(pages.size(), stream);
rmm::device_uvector<int> page_index(pages.size(), stream);
{
thrust::transform(rmm::exec_policy(stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
[] __device__(PageInfo const& page) { return page.src_col_schema; });
thrust::sequence(rmm::exec_policy(stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// compute output column sizes by examining the pages of the -input- columns
for (size_t idx = 0; idx < input_columns.size(); idx++) {
auto const& input_col = input_columns[idx];
auto src_col_schema = input_col.schema_idx;
size_t max_depth = input_col.nesting_depth();
auto* cols = &output_columns;
for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// size iterator. indexes pages by sorted order
auto size_input = thrust::make_transform_iterator(
page_index.begin(),
[src_col_schema, l_idx, pages = pages.device_ptr()] __device__(int index) {
auto const& page = pages[index];
if (page.src_col_schema != src_col_schema || page.flags & PAGEINFO_FLAGS_DICTIONARY) {
return 0;
}
return page.nesting[l_idx].size;
});
// compute column size.
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if (out_buf.size == 0) {
int size = thrust::reduce(rmm::exec_policy(stream), size_input, size_input + pages.size());
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, stream, mr);
}
// compute per-page start offset
thrust::exclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
size_input,
start_offset_output_iterator{pages.device_ptr(),
page_index.begin(),
0,
static_cast<int>(src_col_schema),
static_cast<int>(l_idx)});
}
}
}
/**
* @copydoc cudf::io::parquet::gpu::DecodePageData
*/
void __host__ DecodePageData(hostdevice_vector<PageInfo>& pages,
hostdevice_vector<ColumnChunkDesc> const& chunks,
size_t num_rows,
size_t min_row,
rmm::cuda_stream_view stream)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(pages.size(), 1); // 1 threadblock per page
gpuDecodePageData<<<dim_grid, dim_block, 0, stream.value()>>>(
pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size());
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
2a0fc606a4e5fb28e63172f593bdcc88775c431a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* LI Yang
* 20750699
* [email protected]
*
* COMPILE: nvcc -std=c++11 clustering_cuda_skeleton.cu clustering_impl.cpp main.cpp -o cuda
* RUN: ./cuda <path> <epsilon> <mu> <num_blocks_per_grid> <num_threads_per_block>
*/
#include <iostream>
#include "clustering.h"
// Define variables or functions here
__device__ int get_num_com_nbrs(int *nbrs, int left_start, int left_end, int right_start, int right_end)
{
int left_pos = left_start, right_pos = right_start, num_com_nbrs = 0;
while (left_pos < left_end && right_pos < right_end) {
if (nbrs[left_pos] == nbrs[right_pos]) {
num_com_nbrs++;
left_pos++;
right_pos++;
} else if (nbrs[left_pos] < nbrs[right_pos]) {
left_pos++;
} else {
right_pos++;
}
}
return num_com_nbrs;
}
void expansion(int cur_id, int num_clusters, int *num_sim_nbrs, int **sim_nbrs,
bool *visited, bool *pivots, int *cluster_result)
{
for (int i = 0; i < num_sim_nbrs[cur_id]; i++)
{
int nbr_id = sim_nbrs[cur_id][i];
if ((pivots[nbr_id])&&(!visited[nbr_id]))
{
visited[nbr_id] = true;
cluster_result[nbr_id] = num_clusters;
expansion(nbr_id, num_clusters, num_sim_nbrs, sim_nbrs, visited, pivots,
cluster_result);
}
}
}
__global__ void stage_1(int num_vs_G, int* Device_nbr_offs, int* Device_nbrs,
int num_blocks_per_grid, int num_threads_per_block,
bool* pivots, int* num_sim_nbrs, int* sim_nbrs,
int* size_index, float epsilon, int mu)
{
int my_thread_rank = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = num_blocks_per_grid * num_threads_per_block;
/*
//Change to use the coalesced access (faster than the way used in ass2)
int local_num_vs = ( (num_vs_G - 1) / num_threads ) + 1;
int my_first = my_thread_rank * local_num_vs;
int last = (my_thread_rank + 1) * local_num_vs - 1;
int my_last = (last < num_vs_G ? last : (num_vs_G - 1));
*/
//printf("my_rank is %d, my_first is %d, my_last is %d \n", my_thread_rank, my_first, my_last);
for (int i = my_thread_rank; i < num_vs_G; i+= num_threads)
{
int left_start = Device_nbr_offs[i];
int left_end = Device_nbr_offs[i + 1];
int left_size = left_end - left_start;
//printf("device: i %d, left_start is %d, left_end is %d, left_size is %d \n", i, left_start, left_end, left_size);
//sim_nbrs[i] = new int[left_size];
// loop over all neighbors of i
for (int j = left_start; j < left_end; j++) {
int nbr_id = Device_nbrs[j];
int right_start = Device_nbr_offs[nbr_id];
int right_end = Device_nbr_offs[nbr_id + 1];
int right_size = right_end - right_start;
// compute the similarity
int num_com_nbrs = get_num_com_nbrs(Device_nbrs, left_start, left_end, right_start, right_end);
float sim = (num_com_nbrs + 2) / std::sqrt((left_size + 1.0) * (right_size + 1.0));
if (sim > epsilon) {
//sim_nbrs[i][num_sim_nbrs[i]] = nbr_id;
sim_nbrs[size_index[i]+num_sim_nbrs[i]] = nbr_id;
num_sim_nbrs[i]++;
}
}
if (num_sim_nbrs[i] > mu) pivots[i] = true;
}
__syncthreads();
}
/*
// how to implement stage 2 on GPU without the read/write lock ??
__global__ void stage_2(int num_vs_G, int num_blocks_per_grid, int num_threads_per_block,
bool* pivots, int* num_sim_nbrs, int* sim_nbrs,
bool* visited, int* cluster_result, int* num_clusters,
int* size_index)
{
int my_thread_rank = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = num_blocks_per_grid * num_threads_per_block;
for (int i = my_thread_rank; i < num_vs_G; i += num_threads)
{
if (!pivots[i] || visited[i]) continue;
if ( cluster_result[i] > i || cluster_result[nbr_id] == -1 )
{
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, num_sim_nbrs, sim_nbrs, visited, pivots, cluster_result, size_index);
(*num_clusters)++;
}
else continue;
}
__syncthreads();
}
*/
// Host code
void cuda_scan(int num_vs, int num_es, int *nbr_offs, int *nbrs,
float epsilon, int mu, int num_blocks_per_grid, int num_threads_per_block,
int &num_clusters, int *cluster_result)
{
// pivots for host and device
bool* Device_pivots, *Host_pivots;
hipMalloc((void**)&Device_pivots, sizeof(int) * num_vs );
Host_pivots = new bool[num_vs]();
// sim_nbrs for host and device. 2D array to 1D array, use 1D array on GPU
int* Host_num_sim_nbrs;
Host_num_sim_nbrs = new int[num_vs]();
int** Host_sim_nbrs = new int*[num_vs]();
int* Host_num_nbrs = new int[num_vs]();
int* Host_size_index = new int[num_vs]();
int left_start, left_end, left_size, size_index(0);
for (int i = 0; i < num_vs; i++)
{
left_start = nbr_offs[i];
left_end = nbr_offs[i+1];
left_size = left_end - left_start;
Host_sim_nbrs[i] = new int[left_size];
Host_num_nbrs[i] = left_size;
Host_size_index[i] = size_index;
size_index += left_size;
}
int* Device_num_sim_nbrs, *Device_sim_nbrs;
hipMalloc((void**)&Device_num_sim_nbrs, sizeof(int) * num_vs );
hipMalloc((void**)&Device_sim_nbrs, sizeof(int) * size_index );
// Pass the size index to GPU
int* Device_size_index;
hipMalloc((void**)&Device_size_index, sizeof(int) * num_vs );
hipMemcpy(Device_size_index, Host_size_index, sizeof(int) * num_vs, hipMemcpyHostToDevice);
// Malloc and copy nbr_offs and nbrs to device. <global memory>
int *Device_nbr_offs, *Device_nbrs;
hipMalloc((void**)&Device_nbr_offs, sizeof(int) * (num_vs + 1));
hipMalloc((void**)&Device_nbrs, sizeof(int) * (num_es + 1));
hipMemcpy(Device_nbr_offs, nbr_offs, sizeof(int) * (num_vs + 1), hipMemcpyHostToDevice);
hipMemcpy(Device_nbrs, nbrs, sizeof(int) * (num_es + 1), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( stage_1), dim3(num_blocks_per_grid), dim3(num_threads_per_block), 0, 0, num_vs, Device_nbr_offs, Device_nbrs,
num_blocks_per_grid, num_threads_per_block,
Device_pivots, Device_num_sim_nbrs, Device_sim_nbrs,
Device_size_index, epsilon, mu);
hipDeviceSynchronize();
// Pass back the pivots results and the sim_nbrs results for stage 2
hipMemcpy(Host_pivots, Device_pivots, sizeof(bool) * num_vs, hipMemcpyDeviceToHost);
hipMemcpy(Host_num_sim_nbrs, Device_num_sim_nbrs, sizeof(int) * num_vs, hipMemcpyDeviceToHost);
for (int i = 0; i < num_vs; i++)
{
hipMemcpy(Host_sim_nbrs[i], Device_sim_nbrs + Host_size_index[i], sizeof(int) * Host_num_nbrs[i], hipMemcpyDeviceToHost);
}
// Stage 2
bool* visited = new bool[num_vs]();
for (int i = 0; i < num_vs; i++)
{
if (!Host_pivots[i] || visited[i]) continue;
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, Host_num_sim_nbrs, Host_sim_nbrs, visited, Host_pivots, cluster_result);
num_clusters++;
}
/*
bool* Device_visited;
hipMalloc((void**)&Device_visited, sizeof(int) * num_vs);
int* Device_cluster_result;
hipMalloc((void**)&Device_cluster_result, sizeof(int) * num_vs);
int* Device_num_clusters;
hipMalloc((void**)&Device_num_clusters, sizeof(int));
hipMemcpy(Device_cluster_result, cluster_result, sizeof(int) * num_vs);
stage_2<<<num_blocks_per_grid, num_threads_per_block>>>(num_vs, num_blocks_per_grid, num_threads_per_block,
Device_pivots, Device_num_sim_nbrs, Device_sim_nbrs,
Device_visited, Device_cluster_result, Device_num_clusters,
Device_size_index);
hipMemcpy(cluster_result, Device_cluster_result, sizeof(int) * num_vs, hipMemcpyDeviceToHost);
hipMemcpy(&num_clusters, Device_num_clusters, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(Host_pivots, Device_pivots, sizeof(bool) * num_vs, hipMemcpyDeviceToHost);
*/
for(int i = 0; i < num_vs; i++)
{
delete[] Host_sim_nbrs[i];
}
delete[] Host_sim_nbrs;
delete[] Host_num_nbrs;
delete[] Host_pivots;
delete[] Host_num_sim_nbrs;
delete[] visited;
delete[] Host_size_index;
hipFree(Device_nbr_offs);
hipFree(Device_nbrs);
hipFree(Device_num_sim_nbrs);
hipFree(Device_sim_nbrs);
hipFree(Device_pivots);
hipFree(Device_size_index);
// Fill in the cuda_scan function here
}
|
2a0fc606a4e5fb28e63172f593bdcc88775c431a.cu
|
/*
* LI Yang
* 20750699
* [email protected]
*
* COMPILE: nvcc -std=c++11 clustering_cuda_skeleton.cu clustering_impl.cpp main.cpp -o cuda
* RUN: ./cuda <path> <epsilon> <mu> <num_blocks_per_grid> <num_threads_per_block>
*/
#include <iostream>
#include "clustering.h"
// Define variables or functions here
__device__ int get_num_com_nbrs(int *nbrs, int left_start, int left_end, int right_start, int right_end)
{
int left_pos = left_start, right_pos = right_start, num_com_nbrs = 0;
while (left_pos < left_end && right_pos < right_end) {
if (nbrs[left_pos] == nbrs[right_pos]) {
num_com_nbrs++;
left_pos++;
right_pos++;
} else if (nbrs[left_pos] < nbrs[right_pos]) {
left_pos++;
} else {
right_pos++;
}
}
return num_com_nbrs;
}
void expansion(int cur_id, int num_clusters, int *num_sim_nbrs, int **sim_nbrs,
bool *visited, bool *pivots, int *cluster_result)
{
for (int i = 0; i < num_sim_nbrs[cur_id]; i++)
{
int nbr_id = sim_nbrs[cur_id][i];
if ((pivots[nbr_id])&&(!visited[nbr_id]))
{
visited[nbr_id] = true;
cluster_result[nbr_id] = num_clusters;
expansion(nbr_id, num_clusters, num_sim_nbrs, sim_nbrs, visited, pivots,
cluster_result);
}
}
}
__global__ void stage_1(int num_vs_G, int* Device_nbr_offs, int* Device_nbrs,
int num_blocks_per_grid, int num_threads_per_block,
bool* pivots, int* num_sim_nbrs, int* sim_nbrs,
int* size_index, float epsilon, int mu)
{
int my_thread_rank = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = num_blocks_per_grid * num_threads_per_block;
/*
//Change to use the coalesced access (faster than the way used in ass2)
int local_num_vs = ( (num_vs_G - 1) / num_threads ) + 1;
int my_first = my_thread_rank * local_num_vs;
int last = (my_thread_rank + 1) * local_num_vs - 1;
int my_last = (last < num_vs_G ? last : (num_vs_G - 1));
*/
//printf("my_rank is %d, my_first is %d, my_last is %d \n", my_thread_rank, my_first, my_last);
for (int i = my_thread_rank; i < num_vs_G; i+= num_threads)
{
int left_start = Device_nbr_offs[i];
int left_end = Device_nbr_offs[i + 1];
int left_size = left_end - left_start;
//printf("device: i %d, left_start is %d, left_end is %d, left_size is %d \n", i, left_start, left_end, left_size);
//sim_nbrs[i] = new int[left_size];
// loop over all neighbors of i
for (int j = left_start; j < left_end; j++) {
int nbr_id = Device_nbrs[j];
int right_start = Device_nbr_offs[nbr_id];
int right_end = Device_nbr_offs[nbr_id + 1];
int right_size = right_end - right_start;
// compute the similarity
int num_com_nbrs = get_num_com_nbrs(Device_nbrs, left_start, left_end, right_start, right_end);
float sim = (num_com_nbrs + 2) / std::sqrt((left_size + 1.0) * (right_size + 1.0));
if (sim > epsilon) {
//sim_nbrs[i][num_sim_nbrs[i]] = nbr_id;
sim_nbrs[size_index[i]+num_sim_nbrs[i]] = nbr_id;
num_sim_nbrs[i]++;
}
}
if (num_sim_nbrs[i] > mu) pivots[i] = true;
}
__syncthreads();
}
/*
// how to implement stage 2 on GPU without the read/write lock ??
__global__ void stage_2(int num_vs_G, int num_blocks_per_grid, int num_threads_per_block,
bool* pivots, int* num_sim_nbrs, int* sim_nbrs,
bool* visited, int* cluster_result, int* num_clusters,
int* size_index)
{
int my_thread_rank = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = num_blocks_per_grid * num_threads_per_block;
for (int i = my_thread_rank; i < num_vs_G; i += num_threads)
{
if (!pivots[i] || visited[i]) continue;
if ( cluster_result[i] > i || cluster_result[nbr_id] == -1 )
{
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, num_sim_nbrs, sim_nbrs, visited, pivots, cluster_result, size_index);
(*num_clusters)++;
}
else continue;
}
__syncthreads();
}
*/
// Host code
void cuda_scan(int num_vs, int num_es, int *nbr_offs, int *nbrs,
float epsilon, int mu, int num_blocks_per_grid, int num_threads_per_block,
int &num_clusters, int *cluster_result)
{
// pivots for host and device
bool* Device_pivots, *Host_pivots;
cudaMalloc((void**)&Device_pivots, sizeof(int) * num_vs );
Host_pivots = new bool[num_vs]();
// sim_nbrs for host and device. 2D array to 1D array, use 1D array on GPU
int* Host_num_sim_nbrs;
Host_num_sim_nbrs = new int[num_vs]();
int** Host_sim_nbrs = new int*[num_vs]();
int* Host_num_nbrs = new int[num_vs]();
int* Host_size_index = new int[num_vs]();
int left_start, left_end, left_size, size_index(0);
for (int i = 0; i < num_vs; i++)
{
left_start = nbr_offs[i];
left_end = nbr_offs[i+1];
left_size = left_end - left_start;
Host_sim_nbrs[i] = new int[left_size];
Host_num_nbrs[i] = left_size;
Host_size_index[i] = size_index;
size_index += left_size;
}
int* Device_num_sim_nbrs, *Device_sim_nbrs;
cudaMalloc((void**)&Device_num_sim_nbrs, sizeof(int) * num_vs );
cudaMalloc((void**)&Device_sim_nbrs, sizeof(int) * size_index );
// Pass the size index to GPU
int* Device_size_index;
cudaMalloc((void**)&Device_size_index, sizeof(int) * num_vs );
cudaMemcpy(Device_size_index, Host_size_index, sizeof(int) * num_vs, cudaMemcpyHostToDevice);
// Malloc and copy nbr_offs and nbrs to device. <global memory>
int *Device_nbr_offs, *Device_nbrs;
cudaMalloc((void**)&Device_nbr_offs, sizeof(int) * (num_vs + 1));
cudaMalloc((void**)&Device_nbrs, sizeof(int) * (num_es + 1));
cudaMemcpy(Device_nbr_offs, nbr_offs, sizeof(int) * (num_vs + 1), cudaMemcpyHostToDevice);
cudaMemcpy(Device_nbrs, nbrs, sizeof(int) * (num_es + 1), cudaMemcpyHostToDevice);
stage_1<<<num_blocks_per_grid, num_threads_per_block>>>(num_vs, Device_nbr_offs, Device_nbrs,
num_blocks_per_grid, num_threads_per_block,
Device_pivots, Device_num_sim_nbrs, Device_sim_nbrs,
Device_size_index, epsilon, mu);
cudaDeviceSynchronize();
// Pass back the pivots results and the sim_nbrs results for stage 2
cudaMemcpy(Host_pivots, Device_pivots, sizeof(bool) * num_vs, cudaMemcpyDeviceToHost);
cudaMemcpy(Host_num_sim_nbrs, Device_num_sim_nbrs, sizeof(int) * num_vs, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_vs; i++)
{
cudaMemcpy(Host_sim_nbrs[i], Device_sim_nbrs + Host_size_index[i], sizeof(int) * Host_num_nbrs[i], cudaMemcpyDeviceToHost);
}
// Stage 2
bool* visited = new bool[num_vs]();
for (int i = 0; i < num_vs; i++)
{
if (!Host_pivots[i] || visited[i]) continue;
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, Host_num_sim_nbrs, Host_sim_nbrs, visited, Host_pivots, cluster_result);
num_clusters++;
}
/*
bool* Device_visited;
cudaMalloc((void**)&Device_visited, sizeof(int) * num_vs);
int* Device_cluster_result;
cudaMalloc((void**)&Device_cluster_result, sizeof(int) * num_vs);
int* Device_num_clusters;
cudaMalloc((void**)&Device_num_clusters, sizeof(int));
cudaMemcpy(Device_cluster_result, cluster_result, sizeof(int) * num_vs);
stage_2<<<num_blocks_per_grid, num_threads_per_block>>>(num_vs, num_blocks_per_grid, num_threads_per_block,
Device_pivots, Device_num_sim_nbrs, Device_sim_nbrs,
Device_visited, Device_cluster_result, Device_num_clusters,
Device_size_index);
cudaMemcpy(cluster_result, Device_cluster_result, sizeof(int) * num_vs, cudaMemcpyDeviceToHost);
cudaMemcpy(&num_clusters, Device_num_clusters, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Host_pivots, Device_pivots, sizeof(bool) * num_vs, cudaMemcpyDeviceToHost);
*/
for(int i = 0; i < num_vs; i++)
{
delete[] Host_sim_nbrs[i];
}
delete[] Host_sim_nbrs;
delete[] Host_num_nbrs;
delete[] Host_pivots;
delete[] Host_num_sim_nbrs;
delete[] visited;
delete[] Host_size_index;
cudaFree(Device_nbr_offs);
cudaFree(Device_nbrs);
cudaFree(Device_num_sim_nbrs);
cudaFree(Device_sim_nbrs);
cudaFree(Device_pivots);
cudaFree(Device_size_index);
// Fill in the cuda_scan function here
}
|
e6a5ed915c75359878d9576d2571e06a9a6d0214.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 1
#define TC 16
#define C 64
#define N 32
#define H 14
#define W 14
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[2048];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[8];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 74) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + ((((((int)threadIdx.x) * 74) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 1) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 1) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 2) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 2) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 3) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 3) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 4) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 4) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 5) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 5) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 6) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 6) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 7) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 7) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 8) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 8) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 9) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 9) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 10) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 10) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 10) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 10) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 11) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 11) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 11) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 11) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 12) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 12) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 12) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 12) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 13) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 13) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 13) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 13) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 14) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 14) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 14) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 14) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 15) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 15) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 15) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 15) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 16) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 16) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 16) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 16) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 17) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 17) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 17) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 17) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 18) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 18) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 18) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 18) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 19) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 19) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 19) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 19) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 20) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 20) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 20) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 20) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 21) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 21) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 21) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 21) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 22) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 22) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 22) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 22) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 23) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 23) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 23) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 23) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 24) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 24) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 24) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 24) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 25) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 25) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 25) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 25) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 26) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 26) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 26) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 26) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 27) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 27) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 27) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 27) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 28) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 28) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 28) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 28) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 29) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 29) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 29) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 29) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 30) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 30) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 30) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 30) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 31) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 31) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 31) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 31) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 32) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 32) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 32) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 32) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 33) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 33) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 33) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 33) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 34) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 34) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 34) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 34) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 35))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 35) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 35) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 35) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 35) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 36))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 36) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 36) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 36) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 36) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 37))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 37) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 37) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 37) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 37) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 38))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 38) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 38) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 38) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 38) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 39))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 39) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 39) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 39) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 39) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 40))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 40) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 40) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 40) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 40) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 41))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 41) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 41) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 41) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 41) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 42))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 42) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 42) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 42) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 42) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 43))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 43) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 43) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 43) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 43) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 44))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 44) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 44) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 44) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 44) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 45))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 45) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 45) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 45) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 45) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 46))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 46) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 46) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 46) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 46) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 47))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 47) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 47) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 47) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 47) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 48))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 48) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 48) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 48) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 48) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 49))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 49) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 49) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 49) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 49) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 50))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 50) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 50) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 50) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 50) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 51))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 51) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 51) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 51) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 51) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 52))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 52) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 52) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 52) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 52) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 53))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 53) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 53) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 53) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 53) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 54))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 54) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 54) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 54) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 54) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 55))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 55) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 55) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 55) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 55) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 56))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 56) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 56) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 56) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 56) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 57))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 57) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 57) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 57) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 57) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 58))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 58) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 58) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 58) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 58) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 59))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 59) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 59) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 59) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 59) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 60))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 60) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 60) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 60) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 60) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 61))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 61) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 61) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 61) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 61) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 62) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 62) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1986) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 62))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 62) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 62) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 62) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 62) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 63) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 63) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1985) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 63))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 63) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 63) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 63) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 63) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 74) >> 6)) < 31) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 74) >> 4)) < 124) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1984) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 64))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 74) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + ((((((int)threadIdx.x) * 74) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) + 181))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 65) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 65) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1983) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 65))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 65) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 1) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 66) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 66) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1982) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 66))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 66) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 2) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 67) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 67) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1981) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 67))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 67) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 3) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 68) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 68) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1980) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 68))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 68) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 4) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 69) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 69) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1979) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 69))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 69) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 5) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 70) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 70) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1978) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 70))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 70) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 6) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 71) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 71) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1977) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 71))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 71) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 7) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 72) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 72) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1976) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 72))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 72) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 8) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 73) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 73) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1975) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 73))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 73) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 9) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
kernel_shared[(((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 1))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 2))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 3))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 4))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 5))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 6))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 7))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 8))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 9))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 10))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 11))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 12))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 13))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 14))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 15))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 16))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 17))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 18))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 19))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 20))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 21))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 22))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 23))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 24))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 25))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 26))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 27))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 28))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 29))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1122) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 30))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1121) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 31))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1120) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 32))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1119) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 33))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1118) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 34))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1117) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 35))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1116) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 36))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1115) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 37))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1114) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 38))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1113) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 39))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1112) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 40))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1111) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 41))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 128) + ((int)threadIdx.x)))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 96))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 112))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 576))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 3))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 579))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 6))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 582))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 9))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 585))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 12))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 588))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 15))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 591))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 17))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 33))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 49))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 65))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 81))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 97))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 113))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 1))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 577))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 4))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 580))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 7))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 583))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 10))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 586))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 13))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 589))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 16))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 592))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 18))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 34))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 50))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 66))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 82))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 98))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 114))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 2))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 578))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 5))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 581))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 8))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 584))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 11))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 587))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 14))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 590))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 17))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 593))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
}
}
compute[(((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 392))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 14))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 406))] = compute_local[(3)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,8);
dim3 block(14,1,2);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
e6a5ed915c75359878d9576d2571e06a9a6d0214.cu
|
#include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 1
#define TC 16
#define C 64
#define N 32
#define H 14
#define W 14
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[2048];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[8];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 74) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + ((((((int)threadIdx.x) * 74) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 1) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 1) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 2) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 2) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 3) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 3) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 4) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 4) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 5) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 5) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 6) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 6) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 7) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 7) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 8) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 8) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 9) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 9) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 10) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 10) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 10) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 10) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 11) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 11) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 11) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 11) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 12) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 12) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 12) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 12) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 13) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 13) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 13) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 13) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 14) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 14) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 14) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 14) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 15) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 15) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 15) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 15) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 16) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 16) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 16) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 16) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 17) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 17) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 17) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 17) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 18) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 18) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 18) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 18) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 19) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 19) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 19) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 19) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 20) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 20) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 20) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 20) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 21) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 21) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 21) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 21) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 22) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 22) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 22) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 22) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 23) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 23) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 23) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 23) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 24) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 24) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 24) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 24) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 25) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 25) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 25) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 25) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 26) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 26) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 26) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 26) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 27) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 27) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 27) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 27) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 28) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 28) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 28) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 28) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 29) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 29) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 29) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 29) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 30) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 30) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 30) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 30) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 31) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 31) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 31) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 31) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 32) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 32) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 32) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 32) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 33) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 33) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 33) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 33) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 34) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 34) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 34) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 34) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 35))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 35) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 35) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 35) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 35) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 36))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 36) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 36) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 36) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 36) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 37))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 37) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 37) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 37) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 37) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 38))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 38) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 38) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 38) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 38) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 39))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 39) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 39) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 39) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 39) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 40))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 40) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 40) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 40) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 40) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 41))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 41) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 41) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 41) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 41) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 42))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 42) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 42) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 42) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 42) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 43))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 43) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 43) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 43) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 43) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 44))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 44) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 44) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 44) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 44) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 45))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 45) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 45) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 45) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 45) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 46))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 46) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 46) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 46) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 46) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 47))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 47) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 47) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 47) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 47) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 48))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 48) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 48) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 48) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 48) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 49))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 49) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 49) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 49) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 49) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 50))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 50) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 50) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 50) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 50) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 51))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 51) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 51) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 51) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 51) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 52))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 52) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 52) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 52) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 52) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 53))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 53) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 53) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 53) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 53) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 54))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 54) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 54) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 54) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 54) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 55))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 55) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 55) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 55) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 55) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 56))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 56) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 56) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 56) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 56) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 57))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 57) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 57) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 57) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 57) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 58))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 58) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 58) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 10) & 15))) && ((((((int)threadIdx.x) * 74) + 10) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 58) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 58) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 10) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 59))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 59) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 59) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 11) & 15))) && ((((((int)threadIdx.x) * 74) + 11) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 59) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 59) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 11) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 60))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 60) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 60) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 12) & 15))) && ((((((int)threadIdx.x) * 74) + 12) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 60) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 60) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 12) & 15)) - 15))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 61))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 61) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 61) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 13) & 15))) && ((((((int)threadIdx.x) * 74) + 13) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 61) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 61) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 13) & 15)) - 15))] : 0.000000e+00f);
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 62) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 62) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1986) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 62))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 62) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 62) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 14) & 15))) && ((((((int)threadIdx.x) * 74) + 14) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 62) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 62) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 14) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 63) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 63) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1985) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 63))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 63) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 63) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 15) & 15))) && ((((((int)threadIdx.x) * 74) + 15) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 63) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 63) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 15) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 74) >> 6)) < 31) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 74) >> 4)) < 124) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1984) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 64))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 74) & 63) >> 4)) < 15)) && (1 <= ((((int)threadIdx.x) * 74) & 15))) && (((((int)threadIdx.x) * 74) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 74) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + ((((((int)threadIdx.x) * 74) & 63) >> 4) * 14)) + ((((int)threadIdx.x) * 74) & 15)) + 181))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 65) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 65) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1983) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 65))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 1) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 1) & 15))) && ((((((int)threadIdx.x) * 74) + 1) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 65) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 1) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 1) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 66) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 66) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1982) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 66))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 2) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 2) & 15))) && ((((((int)threadIdx.x) * 74) + 2) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 66) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 2) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 2) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 67) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 67) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1981) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 67))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 3) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 3) & 15))) && ((((((int)threadIdx.x) * 74) + 3) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 67) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 3) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 3) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 68) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 68) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1980) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 68))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 4) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 4) & 15))) && ((((((int)threadIdx.x) * 74) + 4) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 68) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 4) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 4) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 69) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 69) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1979) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 69))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 5) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 5) & 15))) && ((((((int)threadIdx.x) * 74) + 5) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 69) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 5) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 5) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 70) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 70) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1978) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 70))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 6) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 6) & 15))) && ((((((int)threadIdx.x) * 74) + 6) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 70) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 6) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 6) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 71) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 71) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1977) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 71))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 7) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 7) & 15))) && ((((((int)threadIdx.x) * 74) + 7) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 71) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 7) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 7) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 72) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 72) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1976) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 72))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 8) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 8) & 15))) && ((((((int)threadIdx.x) * 74) + 8) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 72) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 8) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 8) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 74) + 73) >> 6)) < 32) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 74) + 73) >> 4)) < 128) {
if (((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) < 1975) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[((((((int)threadIdx.z) * 1024) + (((int)threadIdx.x) * 74)) + 73))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 74) + 9) & 63) >> 4)) < 15)) && (1 <= (((((int)threadIdx.x) * 74) + 9) & 15))) && ((((((int)threadIdx.x) * 74) + 9) & 15) < 15)) ? data[((((((((rc_outer * 6272) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 74) + 73) >> 6) * 196)) + (((int)blockIdx.y) * 28)) + (((((((int)threadIdx.x) * 74) + 9) & 63) >> 4) * 14)) + (((((int)threadIdx.x) * 74) + 9) & 15)) - 15))] : 0.000000e+00f);
}
}
}
}
kernel_shared[(((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 1))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 2))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + (((((int)threadIdx.x) * 14) / 96) * 576)) + (rc_outer * 288)) + (((((int)threadIdx.x) * 14) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 3))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 4))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 5))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 1) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 1) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 6))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 7))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 8))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 2) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 2) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 9))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 10))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 11))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 3) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 3) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 12))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 13))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 14))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 4) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 4) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 15))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 16))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 17))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 5) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 5) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 18))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 19))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 20))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 6) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 6) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 21))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 22))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 23))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 7) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 7) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 24))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 25))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 26))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 8) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 8) % 96) * 3)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 27))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 28))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 29))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 9) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 9) % 96) * 3)) + 2))];
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1122) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 30))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1121) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 31))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 10) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 10) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 374) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1120) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 32))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 10) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 10) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1119) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 33))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1118) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 34))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 11) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 11) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 373) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1117) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 35))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 11) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 11) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1116) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 36))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1115) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 37))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 12) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + ((((int)threadIdx.x) * 14) / 3)) < 124) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 372) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1114) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 38))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 12) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 12) % 96) * 3)) + 2))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1113) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 39))] = kernel[((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1112) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 40))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)) + 1))];
}
}
}
}
}
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 14) + 13) / 96)) < 4) {
if (((((int)threadIdx.z) * 64) + (((((int)threadIdx.x) * 14) + 13) / 3)) < 128) {
if (((((int)threadIdx.z) * 192) + (((int)threadIdx.x) * 14)) < 371) {
if (((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) < 1111) {
if (((int)threadIdx.x) < 13) {
kernel_shared[((((((int)threadIdx.z) * 576) + (((int)threadIdx.x) * 42)) + 41))] = kernel[(((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 1152)) + ((((((int)threadIdx.x) * 14) + 13) / 96) * 576)) + (rc_outer * 288)) + ((((((int)threadIdx.x) * 14) + 13) % 96) * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 128) + ((int)threadIdx.x)))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 96))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 112))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 576))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 3))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 579))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 6))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 582))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 9))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 585))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 12))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 588))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 15))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 591))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 17))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 33))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 49))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 65))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 81))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 97))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 113))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 1))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 577))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 4))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 580))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 7))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 583))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 10))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 586))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 13))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 589))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 16))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 592))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 18))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 34))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 50))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 66))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 82))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 98))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 128) + ((int)threadIdx.x)) + 114))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 2))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 578))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 5))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 581))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 8))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 584))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 11))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 587))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 14))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 590))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 17))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 288) + (rc_inner_outer * 18)) + 593))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
}
}
compute[(((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 392))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 14))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 784) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + ((int)threadIdx.x)) + 406))] = compute_local[(3)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,8);
dim3 block(14,1,2);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
0d979ef095c08b1fa6d8b328c5345d75bacfd8ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <vector>
#include <iostream>
#include "yololayer.h"
#include "cuda_utils.h"
namespace Tn
{
template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = -1;
int input_w = -1;
int input_h = -1;
int max_output_object_count = -1;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
assert(class_count && input_w && input_h && max_output_object_count);
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
0d979ef095c08b1fa6d8b328c5345d75bacfd8ca.cu
|
#include <assert.h>
#include <vector>
#include <iostream>
#include "yololayer.h"
#include "cuda_utils.h"
namespace Tn
{
template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = -1;
int input_w = -1;
int input_h = -1;
int max_output_object_count = -1;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
assert(class_count && input_w && input_h && max_output_object_count);
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
029670b70f1d98d36a62685c97cf20701dbacef5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_random_gaussian_cuda.h"
#include "math/private_random_helpers.h"
#include <hip/hip_runtime.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__
void oskar_mem_random_gaussian_cudak_f(
const int num_elements, float* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3,
const float std)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to normalised Gaussian distribution. */
float4 r;
oskar_box_muller_f(u.i[0], u.i[1], &r.x, &r.y);
oskar_box_muller_f(u.i[2], u.i[3], &r.z, &r.w);
r.x = std * r.x;
r.y = std * r.y;
r.z = std * r.z;
r.w = std * r.w;
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((float4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
__global__
void oskar_mem_random_gaussian_cudak_d(
const int num_elements, double* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3,
const double std)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to normalised Gaussian distribution. */
double4 r;
oskar_box_muller_d(u.i[0], u.i[1], &r.x, &r.y);
oskar_box_muller_d(u.i[2], u.i[3], &r.z, &r.w);
r.x = std * r.x;
r.y = std * r.y;
r.z = std * r.z;
r.w = std * r.w;
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((double4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
void oskar_mem_random_gaussian_cuda_f(int num_elements,
float* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3, float std)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_gaussian_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3, std);
}
void oskar_mem_random_gaussian_cuda_d(int num_elements,
double* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3, double std)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_gaussian_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3, std);
}
#ifdef __cplusplus
}
#endif
|
029670b70f1d98d36a62685c97cf20701dbacef5.cu
|
/*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_random_gaussian_cuda.h"
#include "math/private_random_helpers.h"
#include <cuda_runtime.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__
void oskar_mem_random_gaussian_cudak_f(
const int num_elements, float* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3,
const float std)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to normalised Gaussian distribution. */
float4 r;
oskar_box_muller_f(u.i[0], u.i[1], &r.x, &r.y);
oskar_box_muller_f(u.i[2], u.i[3], &r.z, &r.w);
r.x = std * r.x;
r.y = std * r.y;
r.z = std * r.z;
r.w = std * r.w;
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((float4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
__global__
void oskar_mem_random_gaussian_cudak_d(
const int num_elements, double* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3,
const double std)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to normalised Gaussian distribution. */
double4 r;
oskar_box_muller_d(u.i[0], u.i[1], &r.x, &r.y);
oskar_box_muller_d(u.i[2], u.i[3], &r.z, &r.w);
r.x = std * r.x;
r.y = std * r.y;
r.z = std * r.z;
r.w = std * r.w;
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((double4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
void oskar_mem_random_gaussian_cuda_f(int num_elements,
float* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3, float std)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_gaussian_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3, std);
}
void oskar_mem_random_gaussian_cuda_d(int num_elements,
double* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3, double std)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_gaussian_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3, std);
}
#ifdef __cplusplus
}
#endif
|
7db2e4e65809a9256f710b534b693b374b4e9497.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__device__ unsigned int Rand(unsigned int randx)
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__global__ void cudaFillArray( float *gpu_array, float val, int N )
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( i < N ){
gpu_array[i] = val;
}
}
__global__ void setRandom(float *gpu_array, int N, int maxval )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < N ){
gpu_array[id] = 1.0f / maxval * Rand(id) / float( RAND_MAX );
}
}
void cudaCheckError(hipError_t status)
{
hipError_t status2 = hipGetLastError();
if (status != hipSuccess){
const char *s = hipGetErrorString(status);
printf("CUDA Error: %s\n", s);
}
if (status2 != hipSuccess){
const char *s = hipGetErrorString(status);
printf("CUDA Error Prev: %s\n", s);
}
}
void cudaFillGpuArray( float *array, float val, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( cudaFillArray), dim3(grid_in), dim3(BLOCK), 0, 0, array, val, N );
cudaCheckError(hipPeekAtLastError());
}
float *cudaMakeArray( float *cpu_array, int N )
{
float *gpu_array;
size_t size = N * sizeof(float);
hipError_t status = hipMalloc((void **)&gpu_array, size);
cudaCheckError(status);
if(cpu_array){
hipMemcpy( gpu_array, cpu_array, size, hipMemcpyHostToDevice );
} else {
cudaFillGpuArray( gpu_array, 0, N );
}
return gpu_array;
}
void cudaPutArray( float *gpu_array, float *cpu_array, size_t N )
{
size_t size = N * sizeof(float);
hipError_t status = hipMemcpy(gpu_array, cpu_array, size, hipMemcpyHostToDevice);
cudaCheckError(status);
}
void cudaGetArray( float *cpu_array, float *gpu_array, size_t N )
{
size_t size = N * sizeof(float);
hipError_t status = hipMemcpy(cpu_array, gpu_array, size, hipMemcpyDeviceToHost);
cudaCheckError(status);
}
void cudaClearArray( float *gpu_array, int N )
{
// cudaFillGpuArray( gpu_array, 0, N);
hipMemset(gpu_array, 0, N*sizeof(float));
}
float *cudaMakeRandomArray(int N, int maxval )
{
float *gpu_array = cudaMakeArray( NULL, N );
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( setRandom), dim3(grid), dim3(BLOCK), 0, 0, gpu_array, N, maxval );
return gpu_array;
}
void hipFreeArray( float *array )
{
hipFree(array);
}
} // namespace gpu
|
7db2e4e65809a9256f710b534b693b374b4e9497.cu
|
#include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__device__ unsigned int Rand(unsigned int randx)
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__global__ void cudaFillArray( float *gpu_array, float val, int N )
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( i < N ){
gpu_array[i] = val;
}
}
__global__ void setRandom(float *gpu_array, int N, int maxval )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < N ){
gpu_array[id] = 1.0f / maxval * Rand(id) / float( RAND_MAX );
}
}
void cudaCheckError(cudaError_t status)
{
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess){
const char *s = cudaGetErrorString(status);
printf("CUDA Error: %s\n", s);
}
if (status2 != cudaSuccess){
const char *s = cudaGetErrorString(status);
printf("CUDA Error Prev: %s\n", s);
}
}
void cudaFillGpuArray( float *array, float val, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
cudaFillArray<<<grid_in, BLOCK>>>( array, val, N );
cudaCheckError(cudaPeekAtLastError());
}
float *cudaMakeArray( float *cpu_array, int N )
{
float *gpu_array;
size_t size = N * sizeof(float);
cudaError_t status = cudaMalloc((void **)&gpu_array, size);
cudaCheckError(status);
if(cpu_array){
cudaMemcpy( gpu_array, cpu_array, size, cudaMemcpyHostToDevice );
} else {
cudaFillGpuArray( gpu_array, 0, N );
}
return gpu_array;
}
void cudaPutArray( float *gpu_array, float *cpu_array, size_t N )
{
size_t size = N * sizeof(float);
cudaError_t status = cudaMemcpy(gpu_array, cpu_array, size, cudaMemcpyHostToDevice);
cudaCheckError(status);
}
void cudaGetArray( float *cpu_array, float *gpu_array, size_t N )
{
size_t size = N * sizeof(float);
cudaError_t status = cudaMemcpy(cpu_array, gpu_array, size, cudaMemcpyDeviceToHost);
cudaCheckError(status);
}
void cudaClearArray( float *gpu_array, int N )
{
// cudaFillGpuArray( gpu_array, 0, N);
cudaMemset(gpu_array, 0, N*sizeof(float));
}
float *cudaMakeRandomArray(int N, int maxval )
{
float *gpu_array = cudaMakeArray( NULL, N );
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
setRandom<<<grid, BLOCK>>>( gpu_array, N, maxval );
return gpu_array;
}
void cudaFreeArray( float *array )
{
cudaFree(array);
}
} // namespace gpu
|
1df18465b9f360cd995a4e4b1fd793f42abe0685.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <CudaMathEngineDnnPoolings.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <Kernels/CudaDnnPoolingKernels.h>
#include <Kernels/CudaDnn3dPoolingKernels.h>
#include <Kernels/CudaDnnGlobalPoolingKernels.h>
#include <Kernels/CudaDnnTimePoolingKernels.h>
#include <Kernels/CudaDnnGlobalTimePoolingKernels.h>
namespace NeoML {
CMaxPoolingDesc* CCudaMathEngine::InitMaxPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int strideHeight, int strideWidth, const CBlobDesc& result )
{
CCudaMaxPoolingDesc* desc = new CCudaMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
return desc;
}
void CCudaMathEngine::BlobMaxPooling(const CMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData)
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxPoolingDescInternal& desc = static_cast<const CCudaMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
int* maxIndexPtr = 0;
if(maxIndicesData != 0) {
maxIndexPtr = GetRaw( *maxIndicesData );
}
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount,
result.ObjectCount(), result.Height() * result.Width(), result.Depth() * result.Channels());
hipLaunchKernelGGL(( BlobMaxPoolingKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(sourceData), maxIndexPtr, GetRaw(resultData));
}
void CCudaMathEngine::BlobMaxPoolingBackward( const CMaxPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxPoolingDescInternal& desc = static_cast<const CCudaMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
VectorFill(inputDiffData, 0, inputDiff.BlobSize());
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth;
int batchNorm = (outputDiff.ObjectCount() + BlobMaxPoolingBackwardCombine - 1) / BlobMaxPoolingBackwardCombine;
dim3 blockCount;
dim3 threadCount;
int totalChannels = outputDiff.Depth() * outputDiff.Channels();
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, batchNorm, outputDiff.Height() * outputDiff.Width(), totalChannels);
hipLaunchKernelGGL(( BlobMaxPoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, isAtomic,
GetRaw(outputDiffData), GetRaw(maxIndicesData), GetRaw(inputDiffData), batchNorm );
}
CMeanPoolingDesc* CCudaMathEngine::InitMeanPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int strideHeight, int strideWidth, const CBlobDesc& result )
{
CCudaMeanPoolingDesc* desc = new CCudaMeanPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
return desc;
}
void CCudaMathEngine::BlobMeanPooling( const CMeanPoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMeanPoolingDescInternal& desc = static_cast<const CCudaMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
int totalChannels = result.Depth() * result.Channels();
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount,
result.ObjectCount(), result.Height() * result.Width(), totalChannels);
hipLaunchKernelGGL(( BlobMeanPoolingKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(sourceData), GetRaw(resultData) );
}
void CCudaMathEngine::BlobMeanPoolingBackward( const CMeanPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMeanPoolingDescInternal& desc = static_cast<const CCudaMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& outputDiff = desc.Result;
const bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth;
dim3 blockCount;
dim3 threadCount;
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
getCudaTaskGrid3D( blockCount, threadCount, outputDiff.ObjectCount(), outputDiff.Height() * outputDiff.Width(),
outputDiff.Depth() * outputDiff.Channels() );
hipLaunchKernelGGL(( BlobMeanPoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(outputDiffData),
GetRaw(inputDiffData), isAtomic );
}
CGlobalMaxOverTimePoolingDesc* CCudaMathEngine::InitGlobalMaxOverTimePooling( const CBlobDesc& source, const CBlobDesc& result )
{
CCudaGlobalMaxOverTimePoolingDesc* desc = new CCudaGlobalMaxOverTimePoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
return desc;
}
void CCudaMathEngine::BlobGlobalMaxOverTimePooling( const CGlobalMaxOverTimePoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaGlobalMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
int objectCount = source.BatchLength();
int objectSize = source.BlobSize() / objectCount;
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, objectSize);
if( maxIndicesData == 0 ) {
hipLaunchKernelGGL(( BlobGlobalMaxOverTimePoolingKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(sourceData), GetRaw(resultData) );
} else {
hipLaunchKernelGGL(( BlobGlobalMaxOverTimePoolingWithIndexKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(sourceData), GetRaw(*maxIndicesData), GetRaw(resultData) );
}
}
void CCudaMathEngine::BlobGlobalMaxOverTimePoolingBackward( const CGlobalMaxOverTimePoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CIntHandle& maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaGlobalMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& result = desc.Result;
VectorFill(resultData, 0, result.BlobSize());
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, source.BlobSize());
hipLaunchKernelGGL(( BlobGlobalMaxOverTimePoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw(sourceData), GetRaw(maxIndicesData), GetRaw(resultData) );
}
CGlobalMaxPoolingDesc* CCudaMathEngine::InitGlobalMaxPooling( const CBlobDesc& source, const CBlobDesc& maxIndices, const CBlobDesc& result )
{
CCudaGlobalMaxPoolingDesc* desc = new CCudaGlobalMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.MaxIndices = maxIndices;
desc->Internal.Result = result;
return desc;
}
void CCudaMathEngine::BlobGlobalMaxPooling( const CGlobalMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle& maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxPoolingDescInternal& desc = static_cast<const CCudaGlobalMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& maxIndices = desc.MaxIndices;
const CCudaBlobDesc& result = desc.Result;
ASSERT_EXPR(source.ObjectCount() == result.ObjectCount() && maxIndices.ObjectCount() == result.ObjectCount());
ASSERT_EXPR(maxIndices.ObjectSize() == result.ObjectSize());
int poolSize = source.Depth() * source.Height() * source.Width();
int maxCount = result.Depth() * result.Height() * result.Width();
int poolSizeNorm = (poolSize + BlobGlobalMaxPoolingCombine - 1) / BlobGlobalMaxPoolingCombine;
// As the shared memory size depends on maxCount, we may need to limit the number of threads
int sharedMemoryPerThread = 4 * maxCount * sizeof(float);
int maxThreadCount = device->SharedMemoryLimit / sharedMemoryPerThread;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, device->ThreadMaxCount, blockCount, threadCount,
source.ObjectCount() * source.Channels(), poolSizeNorm, maxThreadCount);
blockCount.x = 1;
int sharedSize = threadCount.y * threadCount.x * sharedMemoryPerThread;
hipLaunchKernelGGL(( BlobGlobalMaxPoolingKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, desc, GetRaw( sourceData ),
GetRaw( maxIndicesData ), GetRaw( resultData ), poolSize, maxCount, poolSizeNorm );
}
void CCudaMathEngine::BlobGlobalMaxPoolingBackward( const CGlobalMaxPoolingDesc& poolingDesc,
const CFloatHandle& outputDiffData, const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxPoolingDescInternal& desc = static_cast<const CCudaGlobalMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
VectorFill( inputDiffData, 0, inputDiff.BlobSize() );
int poolSize = inputDiff.Depth() * inputDiff.Height() * inputDiff.Width();
int maxCount = outputDiff.Depth() * outputDiff.Height() * outputDiff.Width();
int fullSize = outputDiff.ObjectCount() * maxCount * outputDiff.Channels();
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, fullSize, BlobGlobalMaxPoolingBackwardCombine);
hipLaunchKernelGGL(( BlobGlobalMaxPoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ), poolSize, maxCount, fullSize );
}
C3dMaxPoolingDesc* CCudaMathEngine::Init3dMaxPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int filterDepth,
int strideHeight, int strideWidth, int strideDepth,
const CBlobDesc& result )
{
CCuda3dMaxPoolingDesc* desc = new CCuda3dMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterDepth = filterDepth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.StrideDepth = strideDepth;
return desc;
}
void CCudaMathEngine::Blob3dMaxPooling( const C3dMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->IsNull() || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMaxPoolingDescInternal& desc = static_cast<const CCuda3dMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, result.ObjectCount(),
result.Depth() * result.Height() * result.Width(), result.Channels());
hipLaunchKernelGGL(( Blob3dMaxPoolingKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ),
maxIndicesData == 0 ? 0 : GetRaw( *maxIndicesData ), GetRaw( resultData ) );
}
void CCudaMathEngine::Blob3dMaxPoolingBackward( const C3dMaxPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMaxPoolingDescInternal& desc = static_cast<const CCuda3dMaxPoolingDesc&>( poolingDesc ).Internal;
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth || desc.FilterDepth > desc.StrideDepth;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, desc.Result.ObjectCount(),
desc.Result.Depth() * desc.Result.Height() * desc.Result.Width(), desc.Result.Channels());
hipLaunchKernelGGL(( Blob3dMaxPoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ), isAtomic );
}
C3dMeanPoolingDesc* CCudaMathEngine::Init3dMeanPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int filterDepth,
int strideHeight, int strideWidth, int strideDepth,
const CBlobDesc& result )
{
CCuda3dMeanPoolingDesc* desc = new CCuda3dMeanPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterDepth = filterDepth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.StrideDepth = strideDepth;
return desc;
}
void CCudaMathEngine::Blob3dMeanPooling( const C3dMeanPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMeanPoolingDescInternal& desc = static_cast<const CCuda3dMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, result.ObjectCount(),
result.Depth() * result.Height() * result.Width(), result.Channels());
hipLaunchKernelGGL(( Blob3dMeanPoolingKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ), GetRaw( resultData ) );
}
void CCudaMathEngine::Blob3dMeanPoolingBackward( const C3dMeanPoolingDesc& poolingDesc,
const CFloatHandle& outputDiffData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMeanPoolingDescInternal& desc = static_cast<const CCuda3dMeanPoolingDesc&>( poolingDesc ).Internal;
if( desc.FilterHeight != desc.StrideHeight || desc.FilterWidth != desc.StrideWidth || desc.FilterDepth != desc.StrideDepth ) {
// Either the cube blocks used for pooling have nonzero intersections, and we need to add up several diffs,
// or some of the data is skipped when pooling and we need to set diff = 0 for it
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
}
// Indicates that the cube blocks used for pooling have nonzero intersections, and the diffs should be added up (atomicAdd)
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth || desc.FilterDepth > desc.StrideDepth;
const CCudaBlobDesc& outputDiff = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, outputDiff.ObjectCount(),
outputDiff.Depth() * outputDiff.Height() * outputDiff.Width(), outputDiff.Channels());
hipLaunchKernelGGL(( Blob3dMeanPoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( outputDiffData ),
GetRaw( inputDiffData ), isAtomic );
}
CMaxOverTimePoolingDesc* CCudaMathEngine::InitMaxOverTimePooling( const CBlobDesc& source,
int filterLen, int strideLen, const CBlobDesc& result )
{
CCudaMaxOverTimePoolingDesc* desc = new CCudaMaxOverTimePoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterLen = filterLen;
desc->Internal.StrideLen = strideLen;
return desc;
}
void CCudaMathEngine::BlobMaxOverTimePooling( const CMaxOverTimePoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
int xSize = (desc.FilterLen + BlobMaxOverTimePoolingCombine - 1) / BlobMaxOverTimePoolingCombine;
xSize = alignXSizeForWarp(xSize);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, device->ThreadMaxCount, blockCount, threadCount, result.BlobSize(), xSize);
blockCount.x = 1; // in any case there may only one block along the X coordinate so that we can calculate the maximum inside one block
int sharedSize = threadCount.x * threadCount.y * threadCount.z;
if( maxIndicesData != 0 ) {
hipLaunchKernelGGL(( BlobMaxOverTimePoolingKernel), dim3(blockCount), dim3(threadCount), sharedSize * sizeof(CValueWithIndex), 0, desc,
GetRaw( sourceData ), GetRaw( *maxIndicesData ), GetRaw( resultData ) );
} else {
hipLaunchKernelGGL(( BlobMaxOverTimePoolingKernel), dim3(blockCount), dim3(threadCount), sharedSize * sizeof(float), 0, desc,
GetRaw( sourceData ), GetRaw( resultData ) );
}
}
void CCudaMathEngine::BlobMaxOverTimePoolingBackward( const CMaxOverTimePoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
// Set diff to 0
CCudaMathEngine::VectorFill( inputDiffData, 0, inputDiff.BlobSize() );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, outputDiff.BlobSize(), BlobMaxOverTimePoolingBackwardCombine);
if( desc.StrideLen >= desc.FilterLen ) {
// The pooling areas do not intersect, no need to add
CStoreSet store;
hipLaunchKernelGGL(( BlobMaxOverTimePoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, store, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ) );
} else {
CStoreAtomicAdd store;
hipLaunchKernelGGL(( BlobMaxOverTimePoolingBackwardKernel), dim3(blockCount), dim3(threadCount), 0, 0, store, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ));
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
1df18465b9f360cd995a4e4b1fd793f42abe0685.cu
|
/* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <CudaMathEngineDnnPoolings.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <Kernels/CudaDnnPoolingKernels.h>
#include <Kernels/CudaDnn3dPoolingKernels.h>
#include <Kernels/CudaDnnGlobalPoolingKernels.h>
#include <Kernels/CudaDnnTimePoolingKernels.h>
#include <Kernels/CudaDnnGlobalTimePoolingKernels.h>
namespace NeoML {
CMaxPoolingDesc* CCudaMathEngine::InitMaxPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int strideHeight, int strideWidth, const CBlobDesc& result )
{
CCudaMaxPoolingDesc* desc = new CCudaMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
return desc;
}
void CCudaMathEngine::BlobMaxPooling(const CMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData)
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxPoolingDescInternal& desc = static_cast<const CCudaMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
int* maxIndexPtr = 0;
if(maxIndicesData != 0) {
maxIndexPtr = GetRaw( *maxIndicesData );
}
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount,
result.ObjectCount(), result.Height() * result.Width(), result.Depth() * result.Channels());
BlobMaxPoolingKernel<<<blockCount, threadCount>>>( desc, GetRaw(sourceData), maxIndexPtr, GetRaw(resultData));
}
void CCudaMathEngine::BlobMaxPoolingBackward( const CMaxPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxPoolingDescInternal& desc = static_cast<const CCudaMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
VectorFill(inputDiffData, 0, inputDiff.BlobSize());
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth;
int batchNorm = (outputDiff.ObjectCount() + BlobMaxPoolingBackwardCombine - 1) / BlobMaxPoolingBackwardCombine;
dim3 blockCount;
dim3 threadCount;
int totalChannels = outputDiff.Depth() * outputDiff.Channels();
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, batchNorm, outputDiff.Height() * outputDiff.Width(), totalChannels);
BlobMaxPoolingBackwardKernel<<<blockCount, threadCount>>>( desc, isAtomic,
GetRaw(outputDiffData), GetRaw(maxIndicesData), GetRaw(inputDiffData), batchNorm );
}
CMeanPoolingDesc* CCudaMathEngine::InitMeanPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int strideHeight, int strideWidth, const CBlobDesc& result )
{
CCudaMeanPoolingDesc* desc = new CCudaMeanPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
return desc;
}
void CCudaMathEngine::BlobMeanPooling( const CMeanPoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMeanPoolingDescInternal& desc = static_cast<const CCudaMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
int totalChannels = result.Depth() * result.Channels();
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount,
result.ObjectCount(), result.Height() * result.Width(), totalChannels);
BlobMeanPoolingKernel<<<blockCount, threadCount>>>( desc, GetRaw(sourceData), GetRaw(resultData) );
}
void CCudaMathEngine::BlobMeanPoolingBackward( const CMeanPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMeanPoolingDescInternal& desc = static_cast<const CCudaMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& outputDiff = desc.Result;
const bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth;
dim3 blockCount;
dim3 threadCount;
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
getCudaTaskGrid3D( blockCount, threadCount, outputDiff.ObjectCount(), outputDiff.Height() * outputDiff.Width(),
outputDiff.Depth() * outputDiff.Channels() );
BlobMeanPoolingBackwardKernel<<<blockCount, threadCount>>>( desc, GetRaw(outputDiffData),
GetRaw(inputDiffData), isAtomic );
}
CGlobalMaxOverTimePoolingDesc* CCudaMathEngine::InitGlobalMaxOverTimePooling( const CBlobDesc& source, const CBlobDesc& result )
{
CCudaGlobalMaxOverTimePoolingDesc* desc = new CCudaGlobalMaxOverTimePoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
return desc;
}
void CCudaMathEngine::BlobGlobalMaxOverTimePooling( const CGlobalMaxOverTimePoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaGlobalMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
int objectCount = source.BatchLength();
int objectSize = source.BlobSize() / objectCount;
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, objectSize);
if( maxIndicesData == 0 ) {
BlobGlobalMaxOverTimePoolingKernel<<<blockCount, threadCount>>>( desc, GetRaw(sourceData), GetRaw(resultData) );
} else {
BlobGlobalMaxOverTimePoolingWithIndexKernel<<<blockCount, threadCount>>>( desc, GetRaw(sourceData), GetRaw(*maxIndicesData), GetRaw(resultData) );
}
}
void CCudaMathEngine::BlobGlobalMaxOverTimePoolingBackward( const CGlobalMaxOverTimePoolingDesc& poolingDesc,
const CFloatHandle& sourceData, const CIntHandle& maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaGlobalMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& result = desc.Result;
VectorFill(resultData, 0, result.BlobSize());
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, source.BlobSize());
BlobGlobalMaxOverTimePoolingBackwardKernel<<<blockCount, threadCount>>>( desc, GetRaw(sourceData), GetRaw(maxIndicesData), GetRaw(resultData) );
}
CGlobalMaxPoolingDesc* CCudaMathEngine::InitGlobalMaxPooling( const CBlobDesc& source, const CBlobDesc& maxIndices, const CBlobDesc& result )
{
CCudaGlobalMaxPoolingDesc* desc = new CCudaGlobalMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.MaxIndices = maxIndices;
desc->Internal.Result = result;
return desc;
}
void CCudaMathEngine::BlobGlobalMaxPooling( const CGlobalMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle& maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxPoolingDescInternal& desc = static_cast<const CCudaGlobalMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& maxIndices = desc.MaxIndices;
const CCudaBlobDesc& result = desc.Result;
ASSERT_EXPR(source.ObjectCount() == result.ObjectCount() && maxIndices.ObjectCount() == result.ObjectCount());
ASSERT_EXPR(maxIndices.ObjectSize() == result.ObjectSize());
int poolSize = source.Depth() * source.Height() * source.Width();
int maxCount = result.Depth() * result.Height() * result.Width();
int poolSizeNorm = (poolSize + BlobGlobalMaxPoolingCombine - 1) / BlobGlobalMaxPoolingCombine;
// As the shared memory size depends on maxCount, we may need to limit the number of threads
int sharedMemoryPerThread = 4 * maxCount * sizeof(float);
int maxThreadCount = device->SharedMemoryLimit / sharedMemoryPerThread;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, device->ThreadMaxCount, blockCount, threadCount,
source.ObjectCount() * source.Channels(), poolSizeNorm, maxThreadCount);
blockCount.x = 1;
int sharedSize = threadCount.y * threadCount.x * sharedMemoryPerThread;
BlobGlobalMaxPoolingKernel<<<blockCount, threadCount, sharedSize>>>( desc, GetRaw( sourceData ),
GetRaw( maxIndicesData ), GetRaw( resultData ), poolSize, maxCount, poolSizeNorm );
}
void CCudaMathEngine::BlobGlobalMaxPoolingBackward( const CGlobalMaxPoolingDesc& poolingDesc,
const CFloatHandle& outputDiffData, const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaGlobalMaxPoolingDescInternal& desc = static_cast<const CCudaGlobalMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
VectorFill( inputDiffData, 0, inputDiff.BlobSize() );
int poolSize = inputDiff.Depth() * inputDiff.Height() * inputDiff.Width();
int maxCount = outputDiff.Depth() * outputDiff.Height() * outputDiff.Width();
int fullSize = outputDiff.ObjectCount() * maxCount * outputDiff.Channels();
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, fullSize, BlobGlobalMaxPoolingBackwardCombine);
BlobGlobalMaxPoolingBackwardKernel<<<blockCount, threadCount>>>( desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ), poolSize, maxCount, fullSize );
}
C3dMaxPoolingDesc* CCudaMathEngine::Init3dMaxPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int filterDepth,
int strideHeight, int strideWidth, int strideDepth,
const CBlobDesc& result )
{
CCuda3dMaxPoolingDesc* desc = new CCuda3dMaxPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterDepth = filterDepth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.StrideDepth = strideDepth;
return desc;
}
void CCudaMathEngine::Blob3dMaxPooling( const C3dMaxPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->IsNull() || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMaxPoolingDescInternal& desc = static_cast<const CCuda3dMaxPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, result.ObjectCount(),
result.Depth() * result.Height() * result.Width(), result.Channels());
Blob3dMaxPoolingKernel<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ),
maxIndicesData == 0 ? 0 : GetRaw( *maxIndicesData ), GetRaw( resultData ) );
}
void CCudaMathEngine::Blob3dMaxPoolingBackward( const C3dMaxPoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMaxPoolingDescInternal& desc = static_cast<const CCuda3dMaxPoolingDesc&>( poolingDesc ).Internal;
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth || desc.FilterDepth > desc.StrideDepth;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, desc.Result.ObjectCount(),
desc.Result.Depth() * desc.Result.Height() * desc.Result.Width(), desc.Result.Channels());
Blob3dMaxPoolingBackwardKernel<<<blockCount, threadCount>>>( desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ), isAtomic );
}
C3dMeanPoolingDesc* CCudaMathEngine::Init3dMeanPooling( const CBlobDesc& source,
int filterHeight, int filterWidth, int filterDepth,
int strideHeight, int strideWidth, int strideDepth,
const CBlobDesc& result )
{
CCuda3dMeanPoolingDesc* desc = new CCuda3dMeanPoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterHeight = filterHeight;
desc->Internal.FilterWidth = filterWidth;
desc->Internal.FilterDepth = filterDepth;
desc->Internal.StrideHeight = strideHeight;
desc->Internal.StrideWidth = strideWidth;
desc->Internal.StrideDepth = strideDepth;
return desc;
}
void CCudaMathEngine::Blob3dMeanPooling( const C3dMeanPoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMeanPoolingDescInternal& desc = static_cast<const CCuda3dMeanPoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, result.ObjectCount(),
result.Depth() * result.Height() * result.Width(), result.Channels());
Blob3dMeanPoolingKernel<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ), GetRaw( resultData ) );
}
void CCudaMathEngine::Blob3dMeanPoolingBackward( const C3dMeanPoolingDesc& poolingDesc,
const CFloatHandle& outputDiffData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCuda3dMeanPoolingDescInternal& desc = static_cast<const CCuda3dMeanPoolingDesc&>( poolingDesc ).Internal;
if( desc.FilterHeight != desc.StrideHeight || desc.FilterWidth != desc.StrideWidth || desc.FilterDepth != desc.StrideDepth ) {
// Either the cube blocks used for pooling have nonzero intersections, and we need to add up several diffs,
// or some of the data is skipped when pooling and we need to set diff = 0 for it
VectorFill( inputDiffData, 0, desc.Source.BlobSize() );
}
// Indicates that the cube blocks used for pooling have nonzero intersections, and the diffs should be added up (atomicAdd)
bool isAtomic = desc.FilterHeight > desc.StrideHeight || desc.FilterWidth > desc.StrideWidth || desc.FilterDepth > desc.StrideDepth;
const CCudaBlobDesc& outputDiff = desc.Result;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX(1, 1, 32, blockCount, threadCount, outputDiff.ObjectCount(),
outputDiff.Depth() * outputDiff.Height() * outputDiff.Width(), outputDiff.Channels());
Blob3dMeanPoolingBackwardKernel<<<blockCount, threadCount>>>( desc, GetRaw( outputDiffData ),
GetRaw( inputDiffData ), isAtomic );
}
CMaxOverTimePoolingDesc* CCudaMathEngine::InitMaxOverTimePooling( const CBlobDesc& source,
int filterLen, int strideLen, const CBlobDesc& result )
{
CCudaMaxOverTimePoolingDesc* desc = new CCudaMaxOverTimePoolingDesc();
desc->Internal.Source = source;
desc->Internal.Result = result;
desc->Internal.FilterLen = filterLen;
desc->Internal.StrideLen = strideLen;
return desc;
}
void CCudaMathEngine::BlobMaxOverTimePooling( const CMaxOverTimePoolingDesc& poolingDesc, const CFloatHandle& sourceData,
const CIntHandle* maxIndicesData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData == 0 || maxIndicesData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& result = desc.Result;
int xSize = (desc.FilterLen + BlobMaxOverTimePoolingCombine - 1) / BlobMaxOverTimePoolingCombine;
xSize = alignXSizeForWarp(xSize);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, device->ThreadMaxCount, blockCount, threadCount, result.BlobSize(), xSize);
blockCount.x = 1; // in any case there may only one block along the X coordinate so that we can calculate the maximum inside one block
int sharedSize = threadCount.x * threadCount.y * threadCount.z;
if( maxIndicesData != 0 ) {
BlobMaxOverTimePoolingKernel<<<blockCount, threadCount, sharedSize * sizeof(CValueWithIndex)>>>( desc,
GetRaw( sourceData ), GetRaw( *maxIndicesData ), GetRaw( resultData ) );
} else {
BlobMaxOverTimePoolingKernel<<<blockCount, threadCount, sharedSize * sizeof(float)>>>( desc,
GetRaw( sourceData ), GetRaw( resultData ) );
}
}
void CCudaMathEngine::BlobMaxOverTimePoolingBackward( const CMaxOverTimePoolingDesc& poolingDesc, const CFloatHandle& outputDiffData,
const CIntHandle& maxIndicesData, const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( maxIndicesData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaMaxOverTimePoolingDescInternal& desc = static_cast<const CCudaMaxOverTimePoolingDesc&>( poolingDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& outputDiff = desc.Result;
// Set diff to 0
CCudaMathEngine::VectorFill( inputDiffData, 0, inputDiff.BlobSize() );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, outputDiff.BlobSize(), BlobMaxOverTimePoolingBackwardCombine);
if( desc.StrideLen >= desc.FilterLen ) {
// The pooling areas do not intersect, no need to add
CStoreSet store;
BlobMaxOverTimePoolingBackwardKernel<<<blockCount, threadCount>>>(store, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ) );
} else {
CStoreAtomicAdd store;
BlobMaxOverTimePoolingBackwardKernel<<<blockCount, threadCount>>>(store, desc, GetRaw( outputDiffData ),
GetRaw( maxIndicesData ), GetRaw( inputDiffData ));
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
fa057b9c7522355fd0b37bcd59912b06016ff6e0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
To use this source, cite the paper with the following bibtex:
@inproceedings{DBLP:conf/crypto/SongLG17,
author = {Ling Song and Guohong Liao and Jian Guo},
title = {{Non-full Sbox Linearization: Applications to Collision Attacks on Round-Reduced Keccak}},
booktitle = {Advances in Cryptology - {CRYPTO} 2017 - 37th Annual International Cryptology Conference, Santa Barbara, CA, USA, August 20-24, 2017, Proceedings, Part {II}},
pages = {428--451},
year = {2017},
crossref = {DBLP:conf/crypto/2017-2},
url = {https://doi.org/10.1007/978-3-319-63715-0_15},
doi = {10.1007/978-3-319-63715-0_15},
timestamp = {Tue, 15 Aug 2017 07:01:19 +0200},
biburl = {http://dblp.org/rec/bib/conf/crypto/SongLG17},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@proceedings{DBLP:conf/crypto/2017-2,
editor = {Jonathan Katz and Hovav Shacham},
title = {Advances in Cryptology - {CRYPTO} 2017 - 37th Annual International Cryptology Conference, Santa Barbara, CA, USA, August 20-24, 2017, Proceedings, Part {II}},
series = {Lecture Notes in Computer Science},
volume = {10402},
publisher = {Springer},
year = {2017},
url = {https://doi.org/10.1007/978-3-319-63715-0},
doi = {10.1007/978-3-319-63715-0},
isbn = {978-3-319-63714-3},
timestamp = {Mon, 14 Aug 2017 14:37:57 +0200},
biburl = {http://dblp.org/rec/bib/conf/crypto/2017-2},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
rewritten for mpunks @bxxd
*/
#include "kernel.h"
using namespace std;
void logger(const char *priority, const char *format, va_list ap)
{
// Sanity-check parameters
if (!format)
return;
va_list ac;
va_copy(ac, ap);
struct tm t;
time_t ltime = time(NULL);
gmtime_r(<ime, &t);
printf("[%04d-%02d-%02d %02d:%02d:%02d] [%s] ",
(t.tm_year + 1900), (t.tm_mon + 1), t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec,
priority);
vprintf(format, ac);
va_end(ac);
}
void log_sensitive(const char *format, ...)
{
// print_datetime();
#if FULL
va_list ap;
va_start(ap, format);
logger("INFO", format, ap);
va_end(ap);
#endif
}
void log_info(const char *format, ...)
{
// print_datetime();
va_list ap;
va_start(ap, format);
logger("INFO", format, ap);
va_end(ap);
}
void log_err(const char *format, ...)
{
// print_datetime();
va_list ap;
va_start(ap, format);
logger("ERROR", format, ap);
va_end(ap);
}
__device__ uint64_t device_difficulty_upper = 0;
__device__ uint64_t device_difficulty_lower = 5731203885580;
__device__ uint64_t device_minor_upper = 0;
__device__ uint64_t device_minor_lower = 0;
texture<unsigned int, 1, hipReadModeElementType>
texreference_input;
__constant__ uint64_t RC[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808A,
0x8000000080008000, 0x000000000000808B, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008A,
0x0000000000000088, 0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800A, 0x800000008000000A, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008};
#define ROL(x, n) (((x) << (n)) | ((x) >> ((uint64_t)64 - (n))))
uint64_t rand_uint64(void)
{
uint64_t r = 0;
for (int i = 0; i < 64; i += 15 /*30*/)
{
r = r * ((uint64_t)RAND_MAX + 1) + rand();
}
return r;
}
//assume each inputs have the same input length
__device__ uint32_t device_hash_count = 0;
__device__ uint64_t device_found_nonce = 0;
__device__ uint64_t device_found_minor = 0;
__global__ void Keccak1600(const int inputByte, uint8_t *output, const int outputByte, uint64_t startNonce)
{
uint32_t num_keccak_blocks = inputByte / (DATA_BLOCK_SIZE << 1);
uint64_t state00 = 0, state01 = 0, state02 = 0, state03 = 0, state04 = 0,
state10 = 0, state11 = 0, state12 = 0, state13 = 0, state14 = 0,
state20 = 0, state21 = 0, state22 = 0, state23 = 0, state24 = 0,
state30 = 0, state31 = 0, state32 = 0, state33 = 0, state34 = 0,
state40 = 0, state41 = 0, state42 = 0, state43 = 0, state44 = 0;
uint64_t tmpState00 = 0, tmpState01 = 0, tmpState02 = 0, tmpState03 = 0, tmpState04 = 0,
tmpState10 = 0, tmpState11 = 0, tmpState12 = 0, tmpState13 = 0, tmpState14 = 0,
tmpState20 = 0, tmpState21 = 0, tmpState22 = 0, tmpState23 = 0, tmpState24 = 0,
tmpState30 = 0, tmpState31 = 0, tmpState32 = 0, tmpState33 = 0, tmpState34 = 0,
tmpState40 = 0, tmpState41 = 0, tmpState42 = 0, tmpState43 = 0, tmpState44 = 0;
uint64_t Csum0, Csum1, Csum2, Csum3, Csum4, D0, D1, D2, D3, D4;
uint64_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t nonce = startNonce + thread;
// nonce = startNonce + device_hash_count;
#if DEBUG
// printf("nonce=%lu/0x%016x\n", nonce, nonce);
printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
blockDim.x, blockIdx.x, threadIdx.x);
printf("minor difficulty=%lx%016lx\n", device_minor_upper, device_minor_lower);
#else
// printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
// blockDim.x, blockIdx.x, threadIdx.x);
// if (nonce == 609667058559510631)
// {
// printf("here!!!!\n");
// printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
// blockDim.x, blockIdx.x, threadIdx.x);
// }
#endif
uint64_t save_state00, save_state01, save_state02, save_state03;
//absoring phase
for (int k = 0; k < num_keccak_blocks; k++)
{
#if 0 < DATA_BLOCK_SIZE
// state00 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k];
state00 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+1) << 32);
// printf("%016llX\n", state00);
#endif
#if 1 < DATA_BLOCK_SIZE
// state01 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+1];
state01 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 2) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 3) << 32);
#endif
#if 2 < DATA_BLOCK_SIZE
// state02 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+2];
state02 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 4) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 5) << 32);
#endif
#if 3 < DATA_BLOCK_SIZE
// state03 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+3];
state03 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 6) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 7) << 32);
#endif
#if 4 < DATA_BLOCK_SIZE
// state04 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+4];
state04 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 8) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 9) << 32);
#endif
#if 5 < DATA_BLOCK_SIZE
// state10 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+5];
state10 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 10) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 11) << 32);
#endif
#if 6 < DATA_BLOCK_SIZE
// state11 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+6];
state11 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 12) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 13) << 32);
#endif
#if 7 < DATA_BLOCK_SIZE
// state12 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+7];
state12 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 14) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 15) << 32);
#endif
#if 8 < DATA_BLOCK_SIZE
// state13 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+8];
state13 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 16) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 17) << 32);
#endif
#if 9 < DATA_BLOCK_SIZE
// state14 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+9];
state14 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 18) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 19) << 32);
#endif
#if 10 < DATA_BLOCK_SIZE
// state20 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+10];
state20 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 20) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 21) << 32);
#endif
#if 11 < DATA_BLOCK_SIZE
// state21 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+11];
state21 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 22) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 23) << 32);
#endif
#if 12 < DATA_BLOCK_SIZE
// state22 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+12];
state22 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 24) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 25) << 32);
#endif
#if 13 < DATA_BLOCK_SIZE
// state23 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+13];
state23 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 26) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 27) << 32);
#endif
#if 14 < DATA_BLOCK_SIZE
// state24 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+14];
state24 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 28) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 29) << 32);
#endif
#if 15 < DATA_BLOCK_SIZE
// state30 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+15];
state30 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 30) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 31) << 32);
#endif
#if 16 < DATA_BLOCK_SIZE
// state31 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+16];
state31 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 32) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 33) << 32);
#endif
#if 17 < DATA_BLOCK_SIZE
// state32 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+17];
state32 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 34) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 35) << 32);
#endif
#if 18 < DATA_BLOCK_SIZE
// state33 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+18];
state33 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 36) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 37) << 32);
#endif
#if 19 < DATA_BLOCK_SIZE
// state34 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+19];
state34 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 38) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 39) << 32);
#endif
#if 20 < DATA_BLOCK_SIZE
// state40 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+20];
state40 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 40) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 41) << 32);
#endif
#if 21 < DATA_BLOCK_SIZE
// state41 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+21];
state41 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 42) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 43) << 32);
#endif
#if 22 < DATA_BLOCK_SIZE
// state42 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+22];
state42 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 44) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 45) << 32);
#endif
#if 23 < DATA_BLOCK_SIZE
// state43 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+23];
state43 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 46) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 47) << 32);
#endif
#if 24 < DATA_BLOCK_SIZE
// state44 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+24];
state44 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 48) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 49) << 32);
#endif
state03 = cuda_swab64(nonce);
save_state00 = cuda_swab64(state00);
save_state01 = cuda_swab64(state01);
save_state02 = cuda_swab64(state02);
save_state03 = cuda_swab64(state03);
#if DEBUG
printf("MSG:\n0x%016lx%016lx%016lx%016lx\n",
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
#endif
// if (nonce == 609667058559510631)
// {
// printf("MSG:\n0x%016lx%016lx%016lx%016lx\n",
// cuda_swab64(state00),
// cuda_swab64(state01),
// cuda_swab64(state02),
// cuda_swab64(state03));
// }
#pragma unroll 4
for (int i = 0; i < Nr; i++)
{
Csum0 = state00 ^ state10 ^ state20 ^ state30 ^ state40;
Csum1 = state01 ^ state11 ^ state21 ^ state31 ^ state41;
Csum2 = state02 ^ state12 ^ state22 ^ state32 ^ state42;
Csum3 = state03 ^ state13 ^ state23 ^ state33 ^ state43;
Csum4 = state04 ^ state14 ^ state24 ^ state34 ^ state44;
//
D0 = Csum4 ^ ROL(Csum1, 1);
D1 = Csum0 ^ ROL(Csum2, 1);
D2 = Csum1 ^ ROL(Csum3, 1);
D3 = Csum2 ^ ROL(Csum4, 1);
D4 = Csum3 ^ ROL(Csum0, 1);
state00 ^= D0;
state01 ^= D1;
state02 ^= D2;
state03 ^= D3;
state04 ^= D4;
tmpState00 = state00;
tmpState20 = ROL(state01, 1);
tmpState40 = ROL(state02, 62);
tmpState10 = ROL(state03, 28);
tmpState30 = ROL(state04, 27);
state10 ^= D0;
state11 ^= D1;
state12 ^= D2;
state13 ^= D3;
state14 ^= D4;
tmpState31 = ROL(state10, 36);
tmpState01 = ROL(state11, 44);
tmpState21 = ROL(state12, 6);
tmpState41 = ROL(state13, 55);
tmpState11 = ROL(state14, 20);
state20 ^= D0;
state21 ^= D1;
state22 ^= D2;
state23 ^= D3;
state24 ^= D4;
tmpState12 = ROL(state20, 3);
tmpState32 = ROL(state21, 10);
tmpState02 = ROL(state22, 43);
tmpState22 = ROL(state23, 25);
tmpState42 = ROL(state24, 39);
state30 ^= D0;
state31 ^= D1;
state32 ^= D2;
state33 ^= D3;
state34 ^= D4;
tmpState43 = ROL(state30, 41);
tmpState13 = ROL(state31, 45);
tmpState33 = ROL(state32, 15);
tmpState03 = ROL(state33, 21);
tmpState23 = ROL(state34, 8);
state40 ^= D0;
state41 ^= D1;
state42 ^= D2;
state43 ^= D3;
state44 ^= D4;
//
tmpState24 = ROL(state40, 18);
tmpState44 = ROL(state41, 2);
tmpState14 = ROL(state42, 61);
tmpState34 = ROL(state43, 56);
tmpState04 = ROL(state44, 14);
//
state00 = tmpState00 ^ ((~tmpState01) & tmpState02);
state10 = tmpState10 ^ ((~tmpState11) & tmpState12);
state20 = tmpState20 ^ ((~tmpState21) & tmpState22);
state30 = tmpState30 ^ ((~tmpState31) & tmpState32);
state40 = tmpState40 ^ ((~tmpState41) & tmpState42);
state01 = tmpState01 ^ ((~tmpState02) & tmpState03);
state11 = tmpState11 ^ ((~tmpState12) & tmpState13);
state21 = tmpState21 ^ ((~tmpState22) & tmpState23);
state31 = tmpState31 ^ ((~tmpState32) & tmpState33);
state41 = tmpState41 ^ ((~tmpState42) & tmpState43);
state02 = tmpState02 ^ ((~tmpState03) & tmpState04);
state12 = tmpState12 ^ ((~tmpState13) & tmpState14);
state22 = tmpState22 ^ ((~tmpState23) & tmpState24);
state32 = tmpState32 ^ ((~tmpState33) & tmpState34);
state42 = tmpState42 ^ ((~tmpState43) & tmpState44);
state03 = tmpState03 ^ ((~tmpState04) & tmpState00);
state13 = tmpState13 ^ ((~tmpState14) & tmpState10);
state23 = tmpState23 ^ ((~tmpState24) & tmpState20);
state33 = tmpState33 ^ ((~tmpState34) & tmpState30);
state43 = tmpState43 ^ ((~tmpState44) & tmpState40);
state04 = tmpState04 ^ ((~tmpState00) & tmpState01);
state14 = tmpState14 ^ ((~tmpState10) & tmpState11);
state24 = tmpState24 ^ ((~tmpState20) & tmpState21);
state34 = tmpState34 ^ ((~tmpState30) & tmpState31);
state44 = tmpState44 ^ ((~tmpState40) & tmpState41);
state00 ^= RC[i];
}
}
// //squeezing phase;
// #if 0 < HASH_SIZE
// memcpy(output+(blockIdx.x*BLOCKX + threadIdx.x)*HASH_SIZE, &state00, 8);
// #endif
// #if 8 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 8, &state01, 8);
// #endif
// #if 16 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 16, &state02, 8);
// #endif
// #if 24 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 24, &state03, 8);
// #endif
#if DEBUG
// printf("state:0x%016lx\n", cuda_swab64(state00));
printf("nonce=0x%016lx\nOUT: \n0x%016lx%016lx%016lx%016lx\n",
nonce,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
#endif
bool found = 0;
uint32_t upper = 0;
uint64_t lower = 0;
lower = cuda_swab64(state03);
upper = cuda_swab64(state02);
upper = upper << 8;
if (device_difficulty_upper && upper < device_difficulty_upper)
{
found = 1;
}
else
{
if (device_difficulty_upper == upper && lower < device_difficulty_lower)
{
found = 1;
}
}
if (found)
{
// device_found_nonce = nonce;
printf("IN: \n0x%016lx%016lx%016lx%016lx\n OUT: \n0x%016lx%016lx%016lx%016lx\n",
save_state00,
save_state01,
save_state02,
save_state03,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
printf(">>> FOUND XXX nonce=%lu/0x%016lx combined=0x%06lx%016lx difficulty=0x%06lx%016lx\n", nonce, nonce, upper, lower,
device_difficulty_upper, device_difficulty_lower);
device_found_nonce = nonce;
}
else if (device_minor_lower)
{
#if MINOR
// do same thing for minor nonce
found = 0;
if (device_minor_upper && upper < device_minor_upper)
{
found = 1;
}
else
{
if (device_minor_upper == upper && lower < device_minor_lower)
{
found = 1;
}
}
if (found)
{
printf("IN: \n0x%016lx%016lx%016lx%016lx\n OUT: \n0x%016lx%016lx%016lx%016lx\n",
save_state00,
save_state01,
save_state02,
save_state03,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
printf(">>> found minor nonce=%lu/0x%016lx combined=0x%06lx%016lx minor=0x%06lx%016lx\n", nonce, nonce, upper, lower,
device_minor_upper, device_minor_lower);
device_found_minor = nonce;
}
#endif
}
atomicAdd(&device_hash_count, 1);
#if DEBUG
// printf("device_hash_count=%u\n", device_hash_count);
#endif
// #if 32 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 32, &state04, 8);
// #endif
}
int Padding(uint8_t input[], int inputByte, uint8_t output[])
{
int outputByte = R / 8 - (inputByte + 1) % (R / 8) + inputByte + 1;
log_info("Padding inputByte=%d outputByte=%d\n", inputByte, outputByte);
memcpy(output, input, inputByte);
memset(output + inputByte, 0, sizeof(uint8_t) * (outputByte - inputByte));
output[inputByte] = SUFFIX;
output[outputByte - 1] ^= 0x80;
return outputByte;
}
//byte
// uint8_t m[] = {0x22, 0x23, 0x3E, 0x5F, 0xCC, 0x4E, 0xFC, 0x0E, 0xEB, 0x03, 0x0C, 0x72, 0xF9, 0x7A, 0x4E, 0x8A, 0x9D, 0xC4, 0xBB, 0x96, 0x18, 0x33, 0xDA, 0xE8, 0xEF, 0xED, 0xCF, 0xFD, 0xE2, 0xA3, 0xC0, 0x37, 0x00, 0x69, 0xCE, 0x65, 0xB3, 0x32, 0x38, 0xAC, 0x43, 0xD6, 0x47, 0x64, 0xFB, 0xDA, 0xDE, 0xDC, 0x6A, 0x22, 0xA3, 0x0C, 0x15, 0xCC, 0x01, 0x0D, 0x7F, 0xC3, 0xA4, 0x45, 0xE3, 0x5E, 0xDA, 0xB7, 0x69, 0x29, 0xD0, 0xAB, 0x6C, 0x48, 0x35, 0xF2, 0x1F, 0xA7, 0x2D, 0x20, 0xC3, 0x3E, 0x5F, 0xCC, 0x4E, 0xFC, 0x0E, 0xEB, 0x03, 0x0C, 0x72, 0xF9, 0x7A, 0x4E, 0x8A, 0x9D, 0xC4, 0xBB, 0x96, 0x18, 0x33, 0xDA, 0xE8, 0xEF, 0xED, 0xCF, 0xFD, 0xE2, 0xA3, 0xC0, 0x37, 0x00, 0x69, 0xCE, 0x65, 0xB3, 0x32, 0x38, 0xAC, 0x43, 0xD6, 0x47, 0x64, 0xFB, 0xDA, 0xDE, 0xDC};
// uint8_t msg[32] = {0x04, 0x22, 0x00, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x19, 0x00, 0x00, 0x00,
// 0x7D, 0x43, 0x7E, 0x28, 0xCD, 0x73, 0xA3, 0xF4, 0x87,
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
uint8_t msg[32] = {0};
uint8_t output[BLOCKNUM * BLOCKX][HASH_SIZE];
uint8_t input[BLOCKSIZE];
uint8_t host_input[SUMDATASIZE];
// #define STREAMNUM 5 xxx
hipStream_t stream[STREAMNUM];
uint32_t *device_input[STREAMNUM];
uint8_t *device_output[STREAMNUM];
uint64_t getTime(void)
{
uint64_t val = 0;
struct timeval tv;
gettimeofday(&tv, NULL);
val = (((uint64_t)tv.tv_sec) * 1000 + ((uint64_t)tv.tv_usec) / 1000);
// log_info("getTime tv.tv_sec %ld tv_usec %ld val %ld\n", tv.tv_sec, tv.tv_usec, val);
return (uint64_t)val;
}
void printMsg(const char *title, uint8_t *msg, int len)
{
if (title)
{
log_info("%s:\n0x", title);
}
else
{
printf("0x");
}
for (int i = 0; i < len; i++)
{
printf("%02X", msg[i]);
}
printf("\n");
}
void FreeAll()
{
log_info("freeAll..\n");
hipDeviceSynchronize();
for (int i = 0; i < STREAMNUM; i++)
{
hipStreamDestroy(stream[i]);
hipFree(device_input[i]);
hipFree(device_output[i]);
}
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
log_err("Cuda error : % s : % s.\n ", msg, hipGetErrorString(err));
FreeAll();
exit(EXIT_FAILURE);
}
}
void setMsg(OPTS *opts)
{
const char *val;
int base;
mpz_t sender_mpz;
mpz_t lastMinedPunkAsset_mpz;
mpz_t difficulty_mpz;
mpz_t startNonce_mpz;
size_t count;
if (opts->str_address)
{
val = opts->str_address;
}
else
{
val = DEFAULT_ADDRESS;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
mpz_init_set_str(sender_mpz, &val[22], 16);
gmp_printf("sender_mpz=%Zd/%018Zx\n", sender_mpz, sender_mpz);
if (opts->str_lastMined)
{
val = opts->str_lastMined;
}
else
{
val = DEFAULT_LASTMINED;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
mpz_init_set_str(lastMinedPunkAsset_mpz, val, base);
gmp_printf("lastMinedPunkAsset_mpz=%Zd/0x%Zx\n", lastMinedPunkAsset_mpz, lastMinedPunkAsset_mpz);
if (opts->str_startNonce)
{
val = opts->str_startNonce;
}
else
{
val = NULL;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
if (val)
{
mpz_init_set_str(startNonce_mpz, val, base);
gmp_printf("startNonce_mpz=%Zd/0x%Zx\n", startNonce_mpz, startNonce_mpz);
mpz_export(&opts->startNonce, &count, 1, sizeof(opts->startNonce), 0, 0, startNonce_mpz);
}
if (opts->str_difficulty)
{
val = opts->str_difficulty;
}
else
{
val = DEFAULT_DIFFICULTY;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
uint8_t difficulty[16];
if (val)
{
mpz_init_set_str(difficulty_mpz, val, base);
gmp_printf("difficulty_mpz=%Zd/0x%032Zx\n", difficulty_mpz, difficulty_mpz);
mpz_export(difficulty, &count, 1, sizeof(difficulty), 0, 0, difficulty_mpz);
opts->upper_difficulty = ((uint64_t *)difficulty)[1];
opts->lower_difficulty = ((uint64_t *)difficulty)[0];
}
// printMsg("difficulty", difficulty, 16);
// log_info("0x%016lx %016lx\n", opts->upper_difficulty, opts->lower_difficulty);
if (opts->str_minor)
{
val = opts->str_minor;
}
else
{
val = DEFAULT_MINOR;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
if (val)
{
mpz_init_set_str(difficulty_mpz, val, base);
gmp_printf("minor difficulty_mpz=%Zd/0x%032Zx\n", difficulty_mpz, difficulty_mpz);
mpz_export(difficulty, &count, 1, sizeof(difficulty), 0, 0, difficulty_mpz);
opts->upper_minor = ((uint64_t *)difficulty)[1];
opts->lower_minor = ((uint64_t *)difficulty)[0];
}
// printMsg("difficulty", difficulty, 16);
/* set msg */
printMsg("pre msg", msg, 32);
mpz_export(msg, &count, 1, 12, 1, 0, lastMinedPunkAsset_mpz);
mpz_export(msg + 12, &count, 1, 9, 1, 0, sender_mpz);
printMsg("pos msg", msg, 32);
Padding(msg, sizeof(msg), input);
for (int i = 0; i < STREAMNUM; i++)
{
hipStreamCreate(&stream[i]);
}
checkCUDAError("create stream error");
log_info("init.. writing %d blocks size_t=%d\n", BLOCKX * BLOCKNUM, BLOCKSIZE);
for (int i = 0; i < BLOCKX * BLOCKNUM; i++)
{
memcpy(host_input + i * BLOCKSIZE, input, BLOCKSIZE);
// printMsg("msg",host_input + i*BLOCKSIZE, 32);
// break;
}
hipMemcpyToSymbol(device_difficulty_lower, &opts->lower_difficulty, sizeof(opts->lower_difficulty), 0, hipMemcpyHostToDevice);
checkCUDAError("copy to symbol");
hipMemcpyToSymbol(device_difficulty_upper, &opts->upper_difficulty, sizeof(opts->upper_difficulty), 0, hipMemcpyHostToDevice);
checkCUDAError("copy to symbol");
hipMemcpyToSymbol(device_minor_lower, &opts->lower_minor, sizeof(opts->lower_minor), 0, hipMemcpyHostToDevice);
checkCUDAError("copy to symbol");
hipMemcpyToSymbol(device_minor_upper, &opts->upper_minor, sizeof(opts->upper_minor), 0, hipMemcpyHostToDevice);
checkCUDAError("copy to symbol");
}
void GetCudaMalloc(int length)
{
for (int i = 0; i < STREAMNUM; i++)
{
hipMalloc(&device_input[i], BLOCKNUM * BLOCKX * BLOCKSIZE);
checkCUDAError("malloc for device_input");
hipMalloc(&device_output[i], BLOCKX * BLOCKNUM * HASH_SIZE);
checkCUDAError("malloc for device_output");
}
}
static int destructing = 0;
void destruct()
{
log_info("destruct..\n");
if (destructing)
{
return;
}
destructing = 1;
}
/* Signal Handler for SIGINT */
void sigintHandler(int sig_num)
{
log_info("caught signal: SIGINT\n");
destruct();
}
/* Signal Handler for SIGTERM */
void sigtermHandler(int sig_num)
{
log_info("caught signal: SIGTERM\n");
destruct();
}
void get_options(int argc, char **argv, OPTS *opts)
{
int c;
memset(opts, 0, sizeof(OPTS));
opts->controller = DEFAULT_CONTROLLER;
opts->str_address = strdup(DEFAULT_ADDRESS);
opts->start_address = strdup(DEFAULT_ADDRESS);
opts->str_difficulty = strdup(DEFAULT_DIFFICULTY);
opts->str_lastMined = strdup(DEFAULT_LASTMINED);
static struct option long_options[] =
{
{"address", required_argument, 0, 'a'},
{"difficulty", required_argument, 0, 'd'},
{"startNonce", required_argument, 0, 's'},
{"lastMined", required_argument, 0, 'l'},
{"cudaDevice", required_argument, 0, 'x'},
{"testing", no_argument, 0, 't'},
{"user controller flag", optional_argument, 0, 'c'},
{"version", no_argument, 0, 'v'},
{"help", no_argument, 0, 'h'},
{0, 0, 0, 0}};
#if FULL
#else
opts->use_controller = true;
#endif
while (1)
{
int option_index = 0;
c = getopt_long(argc, argv, "a:d:s:l:x:tc::vh", long_options, &option_index);
/* Detect the end of the options. */
if (c == -1)
break;
switch (c)
{
case '0':
log_info("have 0\n");
break;
case 'a':
#if FULL
#else
free(opts->str_address);
opts->str_address = strdup(optarg);
#endif
free(opts->start_address);
opts->start_address = strdup(optarg);
log_info("opt address='%s'\n", opts->str_address);
break;
case 'd':
free(opts->str_difficulty);
opts->str_difficulty = strdup(optarg);
log_info("opt difficulty='%s'\n", opts->str_difficulty);
break;
case 's':
free(opts->str_startNonce);
opts->str_startNonce = strdup(optarg);
log_info("opt startNonce='%s'\n", opts->str_startNonce);
break;
case 'l':
free(opts->str_lastMined);
opts->str_lastMined = strdup(optarg);
log_info("opt lastMined='%s'\n", opts->str_lastMined);
break;
case 'x':
opts->device = atoi(optarg);
log_info("opt device='%d'\n", opts->device);
break;
case 't':
opts->test = true;
log_info("opt test only\n");
break;
case 'c':
opts->use_controller = true;
#if FULL
if (optarg) // XXX
{
opts->controller = strdup(optarg);
}
log_info("use controller=%s\n", opts->controller);
#endif
break;
case 'v':
printf("version=%s\n", VERSION);
exit(0);
default:
log_info("option `%c` is not supported.\n", c);
exit(0);
}
}
}
struct MemoryStruct
{
char *memory;
size_t size;
};
size_t
getCurlData(void *contents, size_t size, size_t nmemb, void *userp)
{
size_t realsize = size * nmemb;
struct MemoryStruct *mem = (struct MemoryStruct *)userp;
mem->memory = (char *)realloc(mem->memory, mem->size + realsize + 1);
if (mem->memory == NULL)
{
/* out of memory! */
log_err("not enough memory (realloc returned NULL)\n");
return 0;
}
memcpy(&(mem->memory[mem->size]), contents, realsize);
mem->size += realsize;
mem->memory[mem->size] = 0;
return realsize;
}
bool json_setValue(char **place, json_t *payload, const char *name, bool *changed)
{
json_t *value = json_object_get(payload, name);
if (!value)
{
log_info("error unable to get %s.\n", name);
return false;
}
json_auto_t *compare = json_string(*place);
if (!json_equal(compare, value))
{
*changed = true;
}
// log_info("%p\n", *place);
if (*changed)
{
free(*place);
*place = strdup((char *)json_string_value(value));
log_info("controller setting %s=%s\n", name, *place);
}
json_decref(compare);
return true;
}
bool submitNonce(OPTS *opts, uint64_t nonce, bool minor)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
// bool success = false;
log_info("submitNonce.. nonce=%lx\n", nonce);
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
if (minor == true)
{
sprintf(url, "%s/submit-ping?nonce=%lu&address=%s&last=%s&src=%s", opts->controller, nonce, address, opts->str_lastMined,
opts->start_address);
}
else
{
sprintf(url, "%s/submit-work?nonce=%lu&address=%s&last=%s&src=%s", opts->controller, nonce, address, opts->str_lastMined,
opts->start_address);
}
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
log_info("response: %s\n", chunk.memory);
free(chunk.memory);
curl_easy_cleanup(curl);
return true;
}
bool submitMinor(OPTS *opts, uint64_t nonce)
{
log_info("submitMinor..\n");
return submitNonce(opts, nonce, true);
}
bool heartbeat(OPTS *opts, uint32_t hash_rate)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
// bool success = false;
log_info("heartbeat.. hash_rate=%u\n", hash_rate);
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
sprintf(url, "%s/heartbeat?hashrate=%u&address=%s&src=%s", opts->controller, hash_rate, address, opts->start_address);
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
free(chunk.memory);
curl_easy_cleanup(curl);
return true;
}
bool getMiningInputs(OPTS *opts)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
bool success = false;
log_info("getMiningInputs..\n");
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
sprintf(url, "%s/mining-inputs?address=%s", opts->controller, address);
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
json_error_t error;
json_t *root;
root = json_loads(chunk.memory, 0, &error);
if (!root)
{
log_info("error loading json %s\n", error.text);
log_info("data %s\n", chunk.memory);
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
else
{
json_t *value = json_object_get(root, "status");
json_auto_t *compare = json_string("success");
if (!json_equal(value, compare))
{
log_info("not successful %s\n", chunk.memory);
json_decref(root);
json_decref(compare);
goto end;
}
json_decref(compare);
json_t *payload = json_object_get(root, "payload");
if (!payload)
{
log_info("unable to get payload. %s\n", chunk.memory);
json_decref(root);
goto end;
}
bool changed = 0;
success = json_setValue(&opts->str_lastMined, payload, "lastMinedAssets", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_address, payload, "senderAddress", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_difficulty, payload, "difficultyTarget", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_minor, payload, "minorDifficulty", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
opts->values_changed = changed;
json_decref(root);
}
end:
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return success;
}
int main(int argc, char **argv)
{
log_info("Hi There!!\n");
/* xxx random number */
time_t t;
srand((unsigned)time(&t) + (unsigned)getpid());
signal(SIGINT, sigintHandler);
signal(SIGTERM, sigtermHandler);
OPTS opts;
get_options(argc, argv, &opts);
if (opts.use_controller)
{
getMiningInputs(&opts);
}
log_info("using device %d\n.", opts.device);
hipSetDevice(opts.device);
checkCUDAError("set device");
int minGridSize, blockSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, Keccak1600, BLOCKSIZE, 0);
log_info("recomminding blockSize=%d gridSize=%d\n", minGridSize, blockSize);
// opts.block_size = blockSize;
// opts.grid_size = minGridSize;
if (opts.test)
{
return 0;
}
GetCudaMalloc(BLOCKSIZE);
timeval tpstart;
log_info("CUDA start\n");
int cur = 0;
gettimeofday(&tpstart, NULL);
// double all_sec = 0;
uint64_t start = getTime();
uint64_t tstart = start;
uint64_t elapsed = 0;
uint32_t n_hashes = 0;
uint32_t hash_count = 0;
uint32_t hash_rate = 0;
uint64_t found_nonce = 0;
uint64_t found_minor = 0;
int n_secs = 0;
hipEvent_t cuda_start, cuda_stop;
setMsg(&opts);
for (int i = 0; i < STREAMNUM; i++)
{
hipMemcpyAsync(device_input[i], host_input, SUMDATASIZE, hipMemcpyHostToDevice, stream[i]);
checkCUDAError("memcpy from buf to device_input");
}
uint64_t startNonce;
int run = 0;
#if DEBUG
if (opts.str_startNonce)
{
startNonce = opts.startNonce;
}
else
{
startNonce = 609667058559510624;
}
for (int i = 0; i < 3; i++)
#else
if (opts.str_startNonce)
{
startNonce = opts.startNonce;
}
else
{
startNonce = rand_uint64();
}
// startNonce = 609667058559510630;
while (!destructing)
// for (int i = 0; i < 2; i++)
#endif
{
#if DEBUG
log_info("%s run=%d startNonce=%lu/0x%016lx ->>\n", ctime(&t), run, startNonce, startNonce);
#endif
hipBindTexture(0, texreference_input, device_input[cur], SUMDATASIZE);
hipEventCreate(&cuda_start);
hipEventCreate(&cuda_stop);
hipEventRecord(cuda_start, 0);
hipLaunchKernelGGL(( Keccak1600), dim3(BLOCKNUM), dim3(BLOCKX), 0, stream[cur], BLOCKSIZE / 4, device_output[cur], HASH_SIZE, startNonce);
hipEventRecord(cuda_stop, 0);
hipEventSynchronize(cuda_stop);
float elapsedTime = 0.0;
hipEventElapsedTime(&elapsedTime, cuda_start, cuda_stop);
hipMemcpyFromSymbol(&hash_count, device_hash_count, sizeof(hash_count), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&found_nonce, device_found_nonce, sizeof(found_nonce), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&found_minor, device_found_minor, sizeof(found_minor), 0, hipMemcpyDeviceToHost);
// log_info("device took %fms for %u hashes\n", elapsedTime, hash_count);
hipEventDestroy(cuda_start);
hipEventDestroy(cuda_stop);
cur = (cur + 1) % STREAMNUM;
hipUnbindTexture(&texreference_input);
// log_info("hash_count=%d\n", hash_count);
if (found_nonce)
{
log_info(">>>>>>>>>>>found_nonce=%lu\n", found_nonce);
submitNonce(&opts, found_nonce, false);
found_nonce = 0;
hipMemcpyToSymbol(device_found_nonce, &found_nonce, sizeof(found_nonce), 0, hipMemcpyHostToDevice);
}
if (found_minor)
{
log_info(">>>>>>>>>>>found_minor=%lu\n", found_minor);
submitMinor(&opts, found_minor);
found_minor = 0;
hipMemcpyToSymbol(device_found_minor, &found_minor, sizeof(found_minor), 0, hipMemcpyHostToDevice);
}
// hash_count = BLOCKX * BLOCKNUM;
startNonce += hash_count;
n_hashes += hash_count;
hash_count = 0;
hipMemcpyToSymbol(device_hash_count, &hash_count, sizeof(hash_count), 0, hipMemcpyHostToDevice);
elapsed = getTime() - tstart;
if (elapsed > 1000)
{
hash_rate = (n_hashes / elapsed) * 1000;
log_info(">>> STATS.. nhashes=%u/s n_secs=%ds nonce=%lu\n", hash_rate, n_secs, startNonce);
n_hashes = 0;
tstart = getTime();
n_secs++;
}
if (n_secs > POLL_TIME && !destructing && opts.use_controller)
{
heartbeat(&opts, hash_rate);
bool success = getMiningInputs(&opts);
if (opts.values_changed)
{
setMsg(&opts);
for (int i = 0; i < STREAMNUM; i++)
{
hipMemcpyAsync(device_input[i], host_input, SUMDATASIZE, hipMemcpyHostToDevice, stream[i]);
checkCUDAError("memcpy from buf to device_input");
}
opts.values_changed = 0;
}
n_secs = 0;
}
run++;
fflush(stdout);
}
FreeAll();
log_info("END\n");
return 0;
}
|
fa057b9c7522355fd0b37bcd59912b06016ff6e0.cu
|
/*
To use this source, cite the paper with the following bibtex:
@inproceedings{DBLP:conf/crypto/SongLG17,
author = {Ling Song and Guohong Liao and Jian Guo},
title = {{Non-full Sbox Linearization: Applications to Collision Attacks on Round-Reduced Keccak}},
booktitle = {Advances in Cryptology - {CRYPTO} 2017 - 37th Annual International Cryptology Conference, Santa Barbara, CA, USA, August 20-24, 2017, Proceedings, Part {II}},
pages = {428--451},
year = {2017},
crossref = {DBLP:conf/crypto/2017-2},
url = {https://doi.org/10.1007/978-3-319-63715-0_15},
doi = {10.1007/978-3-319-63715-0_15},
timestamp = {Tue, 15 Aug 2017 07:01:19 +0200},
biburl = {http://dblp.org/rec/bib/conf/crypto/SongLG17},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@proceedings{DBLP:conf/crypto/2017-2,
editor = {Jonathan Katz and Hovav Shacham},
title = {Advances in Cryptology - {CRYPTO} 2017 - 37th Annual International Cryptology Conference, Santa Barbara, CA, USA, August 20-24, 2017, Proceedings, Part {II}},
series = {Lecture Notes in Computer Science},
volume = {10402},
publisher = {Springer},
year = {2017},
url = {https://doi.org/10.1007/978-3-319-63715-0},
doi = {10.1007/978-3-319-63715-0},
isbn = {978-3-319-63714-3},
timestamp = {Mon, 14 Aug 2017 14:37:57 +0200},
biburl = {http://dblp.org/rec/bib/conf/crypto/2017-2},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
rewritten for mpunks @bxxd
*/
#include "kernel.h"
using namespace std;
void logger(const char *priority, const char *format, va_list ap)
{
// Sanity-check parameters
if (!format)
return;
va_list ac;
va_copy(ac, ap);
struct tm t;
time_t ltime = time(NULL);
gmtime_r(<ime, &t);
printf("[%04d-%02d-%02d %02d:%02d:%02d] [%s] ",
(t.tm_year + 1900), (t.tm_mon + 1), t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec,
priority);
vprintf(format, ac);
va_end(ac);
}
void log_sensitive(const char *format, ...)
{
// print_datetime();
#if FULL
va_list ap;
va_start(ap, format);
logger("INFO", format, ap);
va_end(ap);
#endif
}
void log_info(const char *format, ...)
{
// print_datetime();
va_list ap;
va_start(ap, format);
logger("INFO", format, ap);
va_end(ap);
}
void log_err(const char *format, ...)
{
// print_datetime();
va_list ap;
va_start(ap, format);
logger("ERROR", format, ap);
va_end(ap);
}
__device__ uint64_t device_difficulty_upper = 0;
__device__ uint64_t device_difficulty_lower = 5731203885580;
__device__ uint64_t device_minor_upper = 0;
__device__ uint64_t device_minor_lower = 0;
texture<unsigned int, 1, cudaReadModeElementType>
texreference_input;
__constant__ uint64_t RC[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808A,
0x8000000080008000, 0x000000000000808B, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008A,
0x0000000000000088, 0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800A, 0x800000008000000A, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008};
#define ROL(x, n) (((x) << (n)) | ((x) >> ((uint64_t)64 - (n))))
uint64_t rand_uint64(void)
{
uint64_t r = 0;
for (int i = 0; i < 64; i += 15 /*30*/)
{
r = r * ((uint64_t)RAND_MAX + 1) + rand();
}
return r;
}
//assume each inputs have the same input length
__device__ uint32_t device_hash_count = 0;
__device__ uint64_t device_found_nonce = 0;
__device__ uint64_t device_found_minor = 0;
__global__ void Keccak1600(const int inputByte, uint8_t *output, const int outputByte, uint64_t startNonce)
{
uint32_t num_keccak_blocks = inputByte / (DATA_BLOCK_SIZE << 1);
uint64_t state00 = 0, state01 = 0, state02 = 0, state03 = 0, state04 = 0,
state10 = 0, state11 = 0, state12 = 0, state13 = 0, state14 = 0,
state20 = 0, state21 = 0, state22 = 0, state23 = 0, state24 = 0,
state30 = 0, state31 = 0, state32 = 0, state33 = 0, state34 = 0,
state40 = 0, state41 = 0, state42 = 0, state43 = 0, state44 = 0;
uint64_t tmpState00 = 0, tmpState01 = 0, tmpState02 = 0, tmpState03 = 0, tmpState04 = 0,
tmpState10 = 0, tmpState11 = 0, tmpState12 = 0, tmpState13 = 0, tmpState14 = 0,
tmpState20 = 0, tmpState21 = 0, tmpState22 = 0, tmpState23 = 0, tmpState24 = 0,
tmpState30 = 0, tmpState31 = 0, tmpState32 = 0, tmpState33 = 0, tmpState34 = 0,
tmpState40 = 0, tmpState41 = 0, tmpState42 = 0, tmpState43 = 0, tmpState44 = 0;
uint64_t Csum0, Csum1, Csum2, Csum3, Csum4, D0, D1, D2, D3, D4;
uint64_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t nonce = startNonce + thread;
// nonce = startNonce + device_hash_count;
#if DEBUG
// printf("nonce=%lu/0x%016x\n", nonce, nonce);
printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
blockDim.x, blockIdx.x, threadIdx.x);
printf("minor difficulty=%lx%016lx\n", device_minor_upper, device_minor_lower);
#else
// printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
// blockDim.x, blockIdx.x, threadIdx.x);
// if (nonce == 609667058559510631)
// {
// printf("here!!!!\n");
// printf("n=%lu t=%lu nk=%d bdim=%d bid=%d tid=%d\n", nonce, thread, num_keccak_blocks,
// blockDim.x, blockIdx.x, threadIdx.x);
// }
#endif
uint64_t save_state00, save_state01, save_state02, save_state03;
//absoring phase
for (int k = 0; k < num_keccak_blocks; k++)
{
#if 0 < DATA_BLOCK_SIZE
// state00 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k];
state00 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+1) << 32);
// printf("%016llX\n", state00);
#endif
#if 1 < DATA_BLOCK_SIZE
// state01 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+1];
state01 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 2) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 3) << 32);
#endif
#if 2 < DATA_BLOCK_SIZE
// state02 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+2];
state02 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 4) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 5) << 32);
#endif
#if 3 < DATA_BLOCK_SIZE
// state03 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+3];
state03 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 6) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 7) << 32);
#endif
#if 4 < DATA_BLOCK_SIZE
// state04 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+4];
state04 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 8) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 9) << 32);
#endif
#if 5 < DATA_BLOCK_SIZE
// state10 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+5];
state10 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 10) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 11) << 32);
#endif
#if 6 < DATA_BLOCK_SIZE
// state11 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+6];
state11 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 12) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 13) << 32);
#endif
#if 7 < DATA_BLOCK_SIZE
// state12 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+7];
state12 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 14) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 15) << 32);
#endif
#if 8 < DATA_BLOCK_SIZE
// state13 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+8];
state13 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 16) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 17) << 32);
#endif
#if 9 < DATA_BLOCK_SIZE
// state14 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+9];
state14 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 18) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 19) << 32);
#endif
#if 10 < DATA_BLOCK_SIZE
// state20 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+10];
state20 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 20) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 21) << 32);
#endif
#if 11 < DATA_BLOCK_SIZE
// state21 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+11];
state21 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 22) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 23) << 32);
#endif
#if 12 < DATA_BLOCK_SIZE
// state22 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+12];
state22 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 24) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 25) << 32);
#endif
#if 13 < DATA_BLOCK_SIZE
// state23 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+13];
state23 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 26) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 27) << 32);
#endif
#if 14 < DATA_BLOCK_SIZE
// state24 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+14];
state24 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 28) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 29) << 32);
#endif
#if 15 < DATA_BLOCK_SIZE
// state30 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+15];
state30 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 30) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 31) << 32);
#endif
#if 16 < DATA_BLOCK_SIZE
// state31 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+16];
state31 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 32) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 33) << 32);
#endif
#if 17 < DATA_BLOCK_SIZE
// state32 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+17];
state32 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 34) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 35) << 32);
#endif
#if 18 < DATA_BLOCK_SIZE
// state33 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+18];
state33 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 36) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 37) << 32);
#endif
#if 19 < DATA_BLOCK_SIZE
// state34 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+19];
state34 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 38) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 39) << 32);
#endif
#if 20 < DATA_BLOCK_SIZE
// state40 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+20];
state40 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 40) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 41) << 32);
#endif
#if 21 < DATA_BLOCK_SIZE
// state41 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+21];
state41 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 42) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 43) << 32);
#endif
#if 22 < DATA_BLOCK_SIZE
// state42 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+22];
state42 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 44) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 45) << 32);
#endif
#if 23 < DATA_BLOCK_SIZE
// state43 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+23];
state43 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 46) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 47) << 32);
#endif
#if 24 < DATA_BLOCK_SIZE
// state44 ^= input[(blockIdx.x*BLOCKX + threadIdx.x)*inputByte+ DATA_BLOCK_SIZE*k+24];
state44 ^= (uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 48) ^ ((uint64_t)tex1Dfetch(texreference_input, (blockIdx.x * BLOCKX + threadIdx.x) * inputByte + DATA_BLOCK_SIZE * k + 49) << 32);
#endif
state03 = cuda_swab64(nonce);
save_state00 = cuda_swab64(state00);
save_state01 = cuda_swab64(state01);
save_state02 = cuda_swab64(state02);
save_state03 = cuda_swab64(state03);
#if DEBUG
printf("MSG:\n0x%016lx%016lx%016lx%016lx\n",
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
#endif
// if (nonce == 609667058559510631)
// {
// printf("MSG:\n0x%016lx%016lx%016lx%016lx\n",
// cuda_swab64(state00),
// cuda_swab64(state01),
// cuda_swab64(state02),
// cuda_swab64(state03));
// }
#pragma unroll 4
for (int i = 0; i < Nr; i++)
{
Csum0 = state00 ^ state10 ^ state20 ^ state30 ^ state40;
Csum1 = state01 ^ state11 ^ state21 ^ state31 ^ state41;
Csum2 = state02 ^ state12 ^ state22 ^ state32 ^ state42;
Csum3 = state03 ^ state13 ^ state23 ^ state33 ^ state43;
Csum4 = state04 ^ state14 ^ state24 ^ state34 ^ state44;
//
D0 = Csum4 ^ ROL(Csum1, 1);
D1 = Csum0 ^ ROL(Csum2, 1);
D2 = Csum1 ^ ROL(Csum3, 1);
D3 = Csum2 ^ ROL(Csum4, 1);
D4 = Csum3 ^ ROL(Csum0, 1);
state00 ^= D0;
state01 ^= D1;
state02 ^= D2;
state03 ^= D3;
state04 ^= D4;
tmpState00 = state00;
tmpState20 = ROL(state01, 1);
tmpState40 = ROL(state02, 62);
tmpState10 = ROL(state03, 28);
tmpState30 = ROL(state04, 27);
state10 ^= D0;
state11 ^= D1;
state12 ^= D2;
state13 ^= D3;
state14 ^= D4;
tmpState31 = ROL(state10, 36);
tmpState01 = ROL(state11, 44);
tmpState21 = ROL(state12, 6);
tmpState41 = ROL(state13, 55);
tmpState11 = ROL(state14, 20);
state20 ^= D0;
state21 ^= D1;
state22 ^= D2;
state23 ^= D3;
state24 ^= D4;
tmpState12 = ROL(state20, 3);
tmpState32 = ROL(state21, 10);
tmpState02 = ROL(state22, 43);
tmpState22 = ROL(state23, 25);
tmpState42 = ROL(state24, 39);
state30 ^= D0;
state31 ^= D1;
state32 ^= D2;
state33 ^= D3;
state34 ^= D4;
tmpState43 = ROL(state30, 41);
tmpState13 = ROL(state31, 45);
tmpState33 = ROL(state32, 15);
tmpState03 = ROL(state33, 21);
tmpState23 = ROL(state34, 8);
state40 ^= D0;
state41 ^= D1;
state42 ^= D2;
state43 ^= D3;
state44 ^= D4;
//
tmpState24 = ROL(state40, 18);
tmpState44 = ROL(state41, 2);
tmpState14 = ROL(state42, 61);
tmpState34 = ROL(state43, 56);
tmpState04 = ROL(state44, 14);
//
state00 = tmpState00 ^ ((~tmpState01) & tmpState02);
state10 = tmpState10 ^ ((~tmpState11) & tmpState12);
state20 = tmpState20 ^ ((~tmpState21) & tmpState22);
state30 = tmpState30 ^ ((~tmpState31) & tmpState32);
state40 = tmpState40 ^ ((~tmpState41) & tmpState42);
state01 = tmpState01 ^ ((~tmpState02) & tmpState03);
state11 = tmpState11 ^ ((~tmpState12) & tmpState13);
state21 = tmpState21 ^ ((~tmpState22) & tmpState23);
state31 = tmpState31 ^ ((~tmpState32) & tmpState33);
state41 = tmpState41 ^ ((~tmpState42) & tmpState43);
state02 = tmpState02 ^ ((~tmpState03) & tmpState04);
state12 = tmpState12 ^ ((~tmpState13) & tmpState14);
state22 = tmpState22 ^ ((~tmpState23) & tmpState24);
state32 = tmpState32 ^ ((~tmpState33) & tmpState34);
state42 = tmpState42 ^ ((~tmpState43) & tmpState44);
state03 = tmpState03 ^ ((~tmpState04) & tmpState00);
state13 = tmpState13 ^ ((~tmpState14) & tmpState10);
state23 = tmpState23 ^ ((~tmpState24) & tmpState20);
state33 = tmpState33 ^ ((~tmpState34) & tmpState30);
state43 = tmpState43 ^ ((~tmpState44) & tmpState40);
state04 = tmpState04 ^ ((~tmpState00) & tmpState01);
state14 = tmpState14 ^ ((~tmpState10) & tmpState11);
state24 = tmpState24 ^ ((~tmpState20) & tmpState21);
state34 = tmpState34 ^ ((~tmpState30) & tmpState31);
state44 = tmpState44 ^ ((~tmpState40) & tmpState41);
state00 ^= RC[i];
}
}
// //squeezing phase;
// #if 0 < HASH_SIZE
// memcpy(output+(blockIdx.x*BLOCKX + threadIdx.x)*HASH_SIZE, &state00, 8);
// #endif
// #if 8 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 8, &state01, 8);
// #endif
// #if 16 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 16, &state02, 8);
// #endif
// #if 24 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 24, &state03, 8);
// #endif
#if DEBUG
// printf("state:0x%016lx\n", cuda_swab64(state00));
printf("nonce=0x%016lx\nOUT: \n0x%016lx%016lx%016lx%016lx\n",
nonce,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
#endif
bool found = 0;
uint32_t upper = 0;
uint64_t lower = 0;
lower = cuda_swab64(state03);
upper = cuda_swab64(state02);
upper = upper << 8;
if (device_difficulty_upper && upper < device_difficulty_upper)
{
found = 1;
}
else
{
if (device_difficulty_upper == upper && lower < device_difficulty_lower)
{
found = 1;
}
}
if (found)
{
// device_found_nonce = nonce;
printf("IN: \n0x%016lx%016lx%016lx%016lx\n OUT: \n0x%016lx%016lx%016lx%016lx\n",
save_state00,
save_state01,
save_state02,
save_state03,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
printf(">>> FOUND XXX nonce=%lu/0x%016lx combined=0x%06lx%016lx difficulty=0x%06lx%016lx\n", nonce, nonce, upper, lower,
device_difficulty_upper, device_difficulty_lower);
device_found_nonce = nonce;
}
else if (device_minor_lower)
{
#if MINOR
// do same thing for minor nonce
found = 0;
if (device_minor_upper && upper < device_minor_upper)
{
found = 1;
}
else
{
if (device_minor_upper == upper && lower < device_minor_lower)
{
found = 1;
}
}
if (found)
{
printf("IN: \n0x%016lx%016lx%016lx%016lx\n OUT: \n0x%016lx%016lx%016lx%016lx\n",
save_state00,
save_state01,
save_state02,
save_state03,
cuda_swab64(state00),
cuda_swab64(state01),
cuda_swab64(state02),
cuda_swab64(state03));
printf(">>> found minor nonce=%lu/0x%016lx combined=0x%06lx%016lx minor=0x%06lx%016lx\n", nonce, nonce, upper, lower,
device_minor_upper, device_minor_lower);
device_found_minor = nonce;
}
#endif
}
atomicAdd(&device_hash_count, 1);
#if DEBUG
// printf("device_hash_count=%u\n", device_hash_count);
#endif
// #if 32 < HASH_SIZE
// memcpy(output + (blockIdx.x * BLOCKX + threadIdx.x) * HASH_SIZE + 32, &state04, 8);
// #endif
}
int Padding(uint8_t input[], int inputByte, uint8_t output[])
{
int outputByte = R / 8 - (inputByte + 1) % (R / 8) + inputByte + 1;
log_info("Padding inputByte=%d outputByte=%d\n", inputByte, outputByte);
memcpy(output, input, inputByte);
memset(output + inputByte, 0, sizeof(uint8_t) * (outputByte - inputByte));
output[inputByte] = SUFFIX;
output[outputByte - 1] ^= 0x80;
return outputByte;
}
//byte
// uint8_t m[] = {0x22, 0x23, 0x3E, 0x5F, 0xCC, 0x4E, 0xFC, 0x0E, 0xEB, 0x03, 0x0C, 0x72, 0xF9, 0x7A, 0x4E, 0x8A, 0x9D, 0xC4, 0xBB, 0x96, 0x18, 0x33, 0xDA, 0xE8, 0xEF, 0xED, 0xCF, 0xFD, 0xE2, 0xA3, 0xC0, 0x37, 0x00, 0x69, 0xCE, 0x65, 0xB3, 0x32, 0x38, 0xAC, 0x43, 0xD6, 0x47, 0x64, 0xFB, 0xDA, 0xDE, 0xDC, 0x6A, 0x22, 0xA3, 0x0C, 0x15, 0xCC, 0x01, 0x0D, 0x7F, 0xC3, 0xA4, 0x45, 0xE3, 0x5E, 0xDA, 0xB7, 0x69, 0x29, 0xD0, 0xAB, 0x6C, 0x48, 0x35, 0xF2, 0x1F, 0xA7, 0x2D, 0x20, 0xC3, 0x3E, 0x5F, 0xCC, 0x4E, 0xFC, 0x0E, 0xEB, 0x03, 0x0C, 0x72, 0xF9, 0x7A, 0x4E, 0x8A, 0x9D, 0xC4, 0xBB, 0x96, 0x18, 0x33, 0xDA, 0xE8, 0xEF, 0xED, 0xCF, 0xFD, 0xE2, 0xA3, 0xC0, 0x37, 0x00, 0x69, 0xCE, 0x65, 0xB3, 0x32, 0x38, 0xAC, 0x43, 0xD6, 0x47, 0x64, 0xFB, 0xDA, 0xDE, 0xDC};
// uint8_t msg[32] = {0x04, 0x22, 0x00, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x19, 0x00, 0x00, 0x00,
// 0x7D, 0x43, 0x7E, 0x28, 0xCD, 0x73, 0xA3, 0xF4, 0x87,
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
uint8_t msg[32] = {0};
uint8_t output[BLOCKNUM * BLOCKX][HASH_SIZE];
uint8_t input[BLOCKSIZE];
uint8_t host_input[SUMDATASIZE];
// #define STREAMNUM 5 xxx
cudaStream_t stream[STREAMNUM];
uint32_t *device_input[STREAMNUM];
uint8_t *device_output[STREAMNUM];
uint64_t getTime(void)
{
uint64_t val = 0;
struct timeval tv;
gettimeofday(&tv, NULL);
val = (((uint64_t)tv.tv_sec) * 1000 + ((uint64_t)tv.tv_usec) / 1000);
// log_info("getTime tv.tv_sec %ld tv_usec %ld val %ld\n", tv.tv_sec, tv.tv_usec, val);
return (uint64_t)val;
}
void printMsg(const char *title, uint8_t *msg, int len)
{
if (title)
{
log_info("%s:\n0x", title);
}
else
{
printf("0x");
}
for (int i = 0; i < len; i++)
{
printf("%02X", msg[i]);
}
printf("\n");
}
void FreeAll()
{
log_info("freeAll..\n");
cudaDeviceSynchronize();
for (int i = 0; i < STREAMNUM; i++)
{
cudaStreamDestroy(stream[i]);
cudaFree(device_input[i]);
cudaFree(device_output[i]);
}
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
log_err("Cuda error : % s : % s.\n ", msg, cudaGetErrorString(err));
FreeAll();
exit(EXIT_FAILURE);
}
}
void setMsg(OPTS *opts)
{
const char *val;
int base;
mpz_t sender_mpz;
mpz_t lastMinedPunkAsset_mpz;
mpz_t difficulty_mpz;
mpz_t startNonce_mpz;
size_t count;
if (opts->str_address)
{
val = opts->str_address;
}
else
{
val = DEFAULT_ADDRESS;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
mpz_init_set_str(sender_mpz, &val[22], 16);
gmp_printf("sender_mpz=%Zd/%018Zx\n", sender_mpz, sender_mpz);
if (opts->str_lastMined)
{
val = opts->str_lastMined;
}
else
{
val = DEFAULT_LASTMINED;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
mpz_init_set_str(lastMinedPunkAsset_mpz, val, base);
gmp_printf("lastMinedPunkAsset_mpz=%Zd/0x%Zx\n", lastMinedPunkAsset_mpz, lastMinedPunkAsset_mpz);
if (opts->str_startNonce)
{
val = opts->str_startNonce;
}
else
{
val = NULL;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
if (val)
{
mpz_init_set_str(startNonce_mpz, val, base);
gmp_printf("startNonce_mpz=%Zd/0x%Zx\n", startNonce_mpz, startNonce_mpz);
mpz_export(&opts->startNonce, &count, 1, sizeof(opts->startNonce), 0, 0, startNonce_mpz);
}
if (opts->str_difficulty)
{
val = opts->str_difficulty;
}
else
{
val = DEFAULT_DIFFICULTY;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
uint8_t difficulty[16];
if (val)
{
mpz_init_set_str(difficulty_mpz, val, base);
gmp_printf("difficulty_mpz=%Zd/0x%032Zx\n", difficulty_mpz, difficulty_mpz);
mpz_export(difficulty, &count, 1, sizeof(difficulty), 0, 0, difficulty_mpz);
opts->upper_difficulty = ((uint64_t *)difficulty)[1];
opts->lower_difficulty = ((uint64_t *)difficulty)[0];
}
// printMsg("difficulty", difficulty, 16);
// log_info("0x%016lx %016lx\n", opts->upper_difficulty, opts->lower_difficulty);
if (opts->str_minor)
{
val = opts->str_minor;
}
else
{
val = DEFAULT_MINOR;
}
if (val && val[0] == '0' and val[1] == 'x')
{
val = val + 2;
base = 16;
}
else
{
base = 10;
}
if (val)
{
mpz_init_set_str(difficulty_mpz, val, base);
gmp_printf("minor difficulty_mpz=%Zd/0x%032Zx\n", difficulty_mpz, difficulty_mpz);
mpz_export(difficulty, &count, 1, sizeof(difficulty), 0, 0, difficulty_mpz);
opts->upper_minor = ((uint64_t *)difficulty)[1];
opts->lower_minor = ((uint64_t *)difficulty)[0];
}
// printMsg("difficulty", difficulty, 16);
/* set msg */
printMsg("pre msg", msg, 32);
mpz_export(msg, &count, 1, 12, 1, 0, lastMinedPunkAsset_mpz);
mpz_export(msg + 12, &count, 1, 9, 1, 0, sender_mpz);
printMsg("pos msg", msg, 32);
Padding(msg, sizeof(msg), input);
for (int i = 0; i < STREAMNUM; i++)
{
cudaStreamCreate(&stream[i]);
}
checkCUDAError("create stream error");
log_info("init.. writing %d blocks size_t=%d\n", BLOCKX * BLOCKNUM, BLOCKSIZE);
for (int i = 0; i < BLOCKX * BLOCKNUM; i++)
{
memcpy(host_input + i * BLOCKSIZE, input, BLOCKSIZE);
// printMsg("msg",host_input + i*BLOCKSIZE, 32);
// break;
}
cudaMemcpyToSymbol(device_difficulty_lower, &opts->lower_difficulty, sizeof(opts->lower_difficulty), 0, cudaMemcpyHostToDevice);
checkCUDAError("copy to symbol");
cudaMemcpyToSymbol(device_difficulty_upper, &opts->upper_difficulty, sizeof(opts->upper_difficulty), 0, cudaMemcpyHostToDevice);
checkCUDAError("copy to symbol");
cudaMemcpyToSymbol(device_minor_lower, &opts->lower_minor, sizeof(opts->lower_minor), 0, cudaMemcpyHostToDevice);
checkCUDAError("copy to symbol");
cudaMemcpyToSymbol(device_minor_upper, &opts->upper_minor, sizeof(opts->upper_minor), 0, cudaMemcpyHostToDevice);
checkCUDAError("copy to symbol");
}
void GetCudaMalloc(int length)
{
for (int i = 0; i < STREAMNUM; i++)
{
cudaMalloc(&device_input[i], BLOCKNUM * BLOCKX * BLOCKSIZE);
checkCUDAError("malloc for device_input");
cudaMalloc(&device_output[i], BLOCKX * BLOCKNUM * HASH_SIZE);
checkCUDAError("malloc for device_output");
}
}
static int destructing = 0;
void destruct()
{
log_info("destruct..\n");
if (destructing)
{
return;
}
destructing = 1;
}
/* Signal Handler for SIGINT */
void sigintHandler(int sig_num)
{
log_info("caught signal: SIGINT\n");
destruct();
}
/* Signal Handler for SIGTERM */
void sigtermHandler(int sig_num)
{
log_info("caught signal: SIGTERM\n");
destruct();
}
void get_options(int argc, char **argv, OPTS *opts)
{
int c;
memset(opts, 0, sizeof(OPTS));
opts->controller = DEFAULT_CONTROLLER;
opts->str_address = strdup(DEFAULT_ADDRESS);
opts->start_address = strdup(DEFAULT_ADDRESS);
opts->str_difficulty = strdup(DEFAULT_DIFFICULTY);
opts->str_lastMined = strdup(DEFAULT_LASTMINED);
static struct option long_options[] =
{
{"address", required_argument, 0, 'a'},
{"difficulty", required_argument, 0, 'd'},
{"startNonce", required_argument, 0, 's'},
{"lastMined", required_argument, 0, 'l'},
{"cudaDevice", required_argument, 0, 'x'},
{"testing", no_argument, 0, 't'},
{"user controller flag", optional_argument, 0, 'c'},
{"version", no_argument, 0, 'v'},
{"help", no_argument, 0, 'h'},
{0, 0, 0, 0}};
#if FULL
#else
opts->use_controller = true;
#endif
while (1)
{
int option_index = 0;
c = getopt_long(argc, argv, "a:d:s:l:x:tc::vh", long_options, &option_index);
/* Detect the end of the options. */
if (c == -1)
break;
switch (c)
{
case '0':
log_info("have 0\n");
break;
case 'a':
#if FULL
#else
free(opts->str_address);
opts->str_address = strdup(optarg);
#endif
free(opts->start_address);
opts->start_address = strdup(optarg);
log_info("opt address='%s'\n", opts->str_address);
break;
case 'd':
free(opts->str_difficulty);
opts->str_difficulty = strdup(optarg);
log_info("opt difficulty='%s'\n", opts->str_difficulty);
break;
case 's':
free(opts->str_startNonce);
opts->str_startNonce = strdup(optarg);
log_info("opt startNonce='%s'\n", opts->str_startNonce);
break;
case 'l':
free(opts->str_lastMined);
opts->str_lastMined = strdup(optarg);
log_info("opt lastMined='%s'\n", opts->str_lastMined);
break;
case 'x':
opts->device = atoi(optarg);
log_info("opt device='%d'\n", opts->device);
break;
case 't':
opts->test = true;
log_info("opt test only\n");
break;
case 'c':
opts->use_controller = true;
#if FULL
if (optarg) // XXX
{
opts->controller = strdup(optarg);
}
log_info("use controller=%s\n", opts->controller);
#endif
break;
case 'v':
printf("version=%s\n", VERSION);
exit(0);
default:
log_info("option `%c` is not supported.\n", c);
exit(0);
}
}
}
struct MemoryStruct
{
char *memory;
size_t size;
};
size_t
getCurlData(void *contents, size_t size, size_t nmemb, void *userp)
{
size_t realsize = size * nmemb;
struct MemoryStruct *mem = (struct MemoryStruct *)userp;
mem->memory = (char *)realloc(mem->memory, mem->size + realsize + 1);
if (mem->memory == NULL)
{
/* out of memory! */
log_err("not enough memory (realloc returned NULL)\n");
return 0;
}
memcpy(&(mem->memory[mem->size]), contents, realsize);
mem->size += realsize;
mem->memory[mem->size] = 0;
return realsize;
}
bool json_setValue(char **place, json_t *payload, const char *name, bool *changed)
{
json_t *value = json_object_get(payload, name);
if (!value)
{
log_info("error unable to get %s.\n", name);
return false;
}
json_auto_t *compare = json_string(*place);
if (!json_equal(compare, value))
{
*changed = true;
}
// log_info("%p\n", *place);
if (*changed)
{
free(*place);
*place = strdup((char *)json_string_value(value));
log_info("controller setting %s=%s\n", name, *place);
}
json_decref(compare);
return true;
}
bool submitNonce(OPTS *opts, uint64_t nonce, bool minor)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
// bool success = false;
log_info("submitNonce.. nonce=%lx\n", nonce);
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
if (minor == true)
{
sprintf(url, "%s/submit-ping?nonce=%lu&address=%s&last=%s&src=%s", opts->controller, nonce, address, opts->str_lastMined,
opts->start_address);
}
else
{
sprintf(url, "%s/submit-work?nonce=%lu&address=%s&last=%s&src=%s", opts->controller, nonce, address, opts->str_lastMined,
opts->start_address);
}
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
log_info("response: %s\n", chunk.memory);
free(chunk.memory);
curl_easy_cleanup(curl);
return true;
}
bool submitMinor(OPTS *opts, uint64_t nonce)
{
log_info("submitMinor..\n");
return submitNonce(opts, nonce, true);
}
bool heartbeat(OPTS *opts, uint32_t hash_rate)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
// bool success = false;
log_info("heartbeat.. hash_rate=%u\n", hash_rate);
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
sprintf(url, "%s/heartbeat?hashrate=%u&address=%s&src=%s", opts->controller, hash_rate, address, opts->start_address);
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
free(chunk.memory);
curl_easy_cleanup(curl);
return true;
}
bool getMiningInputs(OPTS *opts)
{
if (destructing)
return false;
CURL *curl;
CURLcode res;
bool success = false;
log_info("getMiningInputs..\n");
curl = curl_easy_init();
struct MemoryStruct chunk;
chunk.memory = NULL;
chunk.size = 0;
chunk.memory = (char *)malloc(1);
char url[256];
const char *address = opts->str_address;
if (!address)
{
address = DEFAULT_ADDRESS;
}
sprintf(url, "%s/mining-inputs?address=%s", opts->controller, address);
log_sensitive("url=%s\n", url);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, getCurlData);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk);
res = curl_easy_perform(curl);
if (res != CURLE_OK)
{
log_err("curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
if (chunk.memory)
{
free(chunk.memory);
}
return false;
}
if (!chunk.memory)
{
log_info("chunk memory is null\n");
curl_easy_cleanup(curl);
return false;
}
json_error_t error;
json_t *root;
root = json_loads(chunk.memory, 0, &error);
if (!root)
{
log_info("error loading json %s\n", error.text);
log_info("data %s\n", chunk.memory);
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return false;
}
else
{
json_t *value = json_object_get(root, "status");
json_auto_t *compare = json_string("success");
if (!json_equal(value, compare))
{
log_info("not successful %s\n", chunk.memory);
json_decref(root);
json_decref(compare);
goto end;
}
json_decref(compare);
json_t *payload = json_object_get(root, "payload");
if (!payload)
{
log_info("unable to get payload. %s\n", chunk.memory);
json_decref(root);
goto end;
}
bool changed = 0;
success = json_setValue(&opts->str_lastMined, payload, "lastMinedAssets", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_address, payload, "senderAddress", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_difficulty, payload, "difficultyTarget", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
success = json_setValue(&opts->str_minor, payload, "minorDifficulty", &changed);
if (!success)
{
log_info("error data: %s\n", chunk.memory);
json_decref(root);
goto end;
}
opts->values_changed = changed;
json_decref(root);
}
end:
if (chunk.memory)
{
free(chunk.memory);
}
curl_easy_cleanup(curl);
return success;
}
int main(int argc, char **argv)
{
log_info("Hi There!!\n");
/* xxx random number */
time_t t;
srand((unsigned)time(&t) + (unsigned)getpid());
signal(SIGINT, sigintHandler);
signal(SIGTERM, sigtermHandler);
OPTS opts;
get_options(argc, argv, &opts);
if (opts.use_controller)
{
getMiningInputs(&opts);
}
log_info("using device %d\n.", opts.device);
cudaSetDevice(opts.device);
checkCUDAError("set device");
int minGridSize, blockSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, Keccak1600, BLOCKSIZE, 0);
log_info("recomminding blockSize=%d gridSize=%d\n", minGridSize, blockSize);
// opts.block_size = blockSize;
// opts.grid_size = minGridSize;
if (opts.test)
{
return 0;
}
GetCudaMalloc(BLOCKSIZE);
timeval tpstart;
log_info("CUDA start\n");
int cur = 0;
gettimeofday(&tpstart, NULL);
// double all_sec = 0;
uint64_t start = getTime();
uint64_t tstart = start;
uint64_t elapsed = 0;
uint32_t n_hashes = 0;
uint32_t hash_count = 0;
uint32_t hash_rate = 0;
uint64_t found_nonce = 0;
uint64_t found_minor = 0;
int n_secs = 0;
cudaEvent_t cuda_start, cuda_stop;
setMsg(&opts);
for (int i = 0; i < STREAMNUM; i++)
{
cudaMemcpyAsync(device_input[i], host_input, SUMDATASIZE, cudaMemcpyHostToDevice, stream[i]);
checkCUDAError("memcpy from buf to device_input");
}
uint64_t startNonce;
int run = 0;
#if DEBUG
if (opts.str_startNonce)
{
startNonce = opts.startNonce;
}
else
{
startNonce = 609667058559510624;
}
for (int i = 0; i < 3; i++)
#else
if (opts.str_startNonce)
{
startNonce = opts.startNonce;
}
else
{
startNonce = rand_uint64();
}
// startNonce = 609667058559510630;
while (!destructing)
// for (int i = 0; i < 2; i++)
#endif
{
#if DEBUG
log_info("%s run=%d startNonce=%lu/0x%016lx ->>\n", ctime(&t), run, startNonce, startNonce);
#endif
cudaBindTexture(0, texreference_input, device_input[cur], SUMDATASIZE);
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_stop);
cudaEventRecord(cuda_start, 0);
Keccak1600<<<BLOCKNUM, BLOCKX, 0, stream[cur]>>>(BLOCKSIZE / 4, device_output[cur], HASH_SIZE, startNonce);
cudaEventRecord(cuda_stop, 0);
cudaEventSynchronize(cuda_stop);
float elapsedTime = 0.0;
cudaEventElapsedTime(&elapsedTime, cuda_start, cuda_stop);
cudaMemcpyFromSymbol(&hash_count, device_hash_count, sizeof(hash_count), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&found_nonce, device_found_nonce, sizeof(found_nonce), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&found_minor, device_found_minor, sizeof(found_minor), 0, cudaMemcpyDeviceToHost);
// log_info("device took %fms for %u hashes\n", elapsedTime, hash_count);
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
cur = (cur + 1) % STREAMNUM;
cudaUnbindTexture(&texreference_input);
// log_info("hash_count=%d\n", hash_count);
if (found_nonce)
{
log_info(">>>>>>>>>>>found_nonce=%lu\n", found_nonce);
submitNonce(&opts, found_nonce, false);
found_nonce = 0;
cudaMemcpyToSymbol(device_found_nonce, &found_nonce, sizeof(found_nonce), 0, cudaMemcpyHostToDevice);
}
if (found_minor)
{
log_info(">>>>>>>>>>>found_minor=%lu\n", found_minor);
submitMinor(&opts, found_minor);
found_minor = 0;
cudaMemcpyToSymbol(device_found_minor, &found_minor, sizeof(found_minor), 0, cudaMemcpyHostToDevice);
}
// hash_count = BLOCKX * BLOCKNUM;
startNonce += hash_count;
n_hashes += hash_count;
hash_count = 0;
cudaMemcpyToSymbol(device_hash_count, &hash_count, sizeof(hash_count), 0, cudaMemcpyHostToDevice);
elapsed = getTime() - tstart;
if (elapsed > 1000)
{
hash_rate = (n_hashes / elapsed) * 1000;
log_info(">>> STATS.. nhashes=%u/s n_secs=%ds nonce=%lu\n", hash_rate, n_secs, startNonce);
n_hashes = 0;
tstart = getTime();
n_secs++;
}
if (n_secs > POLL_TIME && !destructing && opts.use_controller)
{
heartbeat(&opts, hash_rate);
bool success = getMiningInputs(&opts);
if (opts.values_changed)
{
setMsg(&opts);
for (int i = 0; i < STREAMNUM; i++)
{
cudaMemcpyAsync(device_input[i], host_input, SUMDATASIZE, cudaMemcpyHostToDevice, stream[i]);
checkCUDAError("memcpy from buf to device_input");
}
opts.values_changed = 0;
}
n_secs = 0;
}
run++;
fflush(stdout);
}
FreeAll();
log_info("END\n");
return 0;
}
|
ed17debea57a03888a77a395f48ffd5aa4ba0aeb.hip
|
// !!! This is a file automatically generated by hipify!!!
/* simple-warp-divergence.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define CHECK_CUDA_CALL(call) \
{ \
const hipError_t error = call; \
\
if (error != hipSuccess) { \
fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \
__FILE__, __LINE__, \
error, hipGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void warmUp(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
c[id] = 0.0f;
}
__global__ void warpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if (id % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void noWarpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if ((id / warpSize) % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void warpDivergencePredicate(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
bool pred = (id % 2 == 0);
if (pred)
a = 100.0f;
if (!pred)
b = 200.0f;
c[id] = a + b;
}
int main(int argc, char** argv)
{
int dev;
hipDeviceProp_t deviceProp;
int size;
int blockSize;
size_t numOfBytes;
float* devC;
struct timeval startTime;
struct timeval endTime;
/* Setup device */
dev = 0;
CHECK_CUDA_CALL(hipGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
/* Set data size */
if (argc > 1)
blockSize = atoi(argv[1]);
else
blockSize = 64;
if (argc > 2)
size = atoi(argv[2]);
else
size = 64;
printf("Data size: %d, Block size: %d\n", size, blockSize);
/* Set execution configuration */
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n",
grid.x, grid.y, block.x, block.y);
numOfBytes = size * sizeof(float);
CHECK_CUDA_CALL(hipMalloc((float**)&devC, numOfBytes));
CHECK_CUDA_CALL(hipDeviceSynchronize());
/* Call kernel for warming up */
gettimeofday(&startTime, NULL);
hipLaunchKernelGGL(( warmUp), dim3(grid), dim3(block), 0, 0, devC);
CHECK_CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("Warmup execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(hipGetLastError());
/* Call kernel that causes warp divergence */
gettimeofday(&startTime, NULL);
hipLaunchKernelGGL(( warpDivergence), dim3(grid), dim3(block), 0, 0, devC);
CHECK_CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(hipGetLastError());
/* Call kernel that does not cause warp divergence */
gettimeofday(&startTime, NULL);
hipLaunchKernelGGL(( noWarpDivergence), dim3(grid), dim3(block), 0, 0, devC);
CHECK_CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("NoWarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(hipGetLastError());
/* Call kernel that uses predicates */
gettimeofday(&startTime, NULL);
hipLaunchKernelGGL(( warpDivergencePredicate), dim3(grid), dim3(block), 0, 0, devC);
CHECK_CUDA_CALL(hipDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergencePredicate execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(hipGetLastError());
/* Free device memory */
CHECK_CUDA_CALL(hipFree(devC));
/* Reset device */
CHECK_CUDA_CALL(hipDeviceReset());
return EXIT_SUCCESS;
}
|
ed17debea57a03888a77a395f48ffd5aa4ba0aeb.cu
|
/* simple-warp-divergence.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define CHECK_CUDA_CALL(call) \
{ \
const cudaError_t error = call; \
\
if (error != cudaSuccess) { \
fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \
__FILE__, __LINE__, \
error, cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
__global__ void warmUp(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
c[id] = 0.0f;
}
__global__ void warpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if (id % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void noWarpDivergence(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
if ((id / warpSize) % 2 == 0)
a = 100.0f;
else
b = 200.0f;
c[id] = a + b;
}
__global__ void warpDivergencePredicate(float* c)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float a = 0.0f;
float b = 0.0f;
bool pred = (id % 2 == 0);
if (pred)
a = 100.0f;
if (!pred)
b = 200.0f;
c[id] = a + b;
}
int main(int argc, char** argv)
{
int dev;
cudaDeviceProp deviceProp;
int size;
int blockSize;
size_t numOfBytes;
float* devC;
struct timeval startTime;
struct timeval endTime;
/* Setup device */
dev = 0;
CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
/* Set data size */
if (argc > 1)
blockSize = atoi(argv[1]);
else
blockSize = 64;
if (argc > 2)
size = atoi(argv[2]);
else
size = 64;
printf("Data size: %d, Block size: %d\n", size, blockSize);
/* Set execution configuration */
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution configuration: <<<(%d, %d), (%d, %d)>>>\n",
grid.x, grid.y, block.x, block.y);
numOfBytes = size * sizeof(float);
CHECK_CUDA_CALL(cudaMalloc((float**)&devC, numOfBytes));
CHECK_CUDA_CALL(cudaDeviceSynchronize());
/* Call kernel for warming up */
gettimeofday(&startTime, NULL);
warmUp<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("Warmup execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that causes warp divergence */
gettimeofday(&startTime, NULL);
warpDivergence<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that does not cause warp divergence */
gettimeofday(&startTime, NULL);
noWarpDivergence<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("NoWarpDivergence execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Call kernel that uses predicates */
gettimeofday(&startTime, NULL);
warpDivergencePredicate<<<grid, block>>>(devC);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
gettimeofday(&endTime, NULL);
printf("WarpDivergencePredicate execution time: %.6f\n",
((double)endTime.tv_sec + (double)endTime.tv_usec * 1.0e-6) -
((double)startTime.tv_sec + (double)startTime.tv_usec * 1.0e-6));
/* Check kernel error */
CHECK_CUDA_CALL(cudaGetLastError());
/* Free device memory */
CHECK_CUDA_CALL(cudaFree(devC));
/* Reset device */
CHECK_CUDA_CALL(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
c7ff9107b52e378d3b9ae763cd43c5fd0bca7639.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
*
* File: cudaSaTabsearch_kernel.cu
* Author: Alex Stivala
* Created: January 2010
*
* $Id: cudaSaTabsearch_kernel.cu 3557 2010-04-13 00:54:09Z alexs $
*
* CUDA kernel for simulated annealing tableau matching (discrete).
* This is a CUDA implemenation of the FORTRAN subroutine TSAMTD,
* using a modified version of the CUDA SDK Mersenne Twister kernel
* to generate pseudorandom numbers.
*
* if CUDA preprocessor symbol is defined, this is the CUDA kernel version.
* __DEVICE_EMULATION__ may also be defined for this case (nvcc -deviceemu)
* in which case device emulation mode is being used
*
* Otherwise (CUDA symbol not defined), this builds a host (single threaded)
* version.
*
* if DEBUG (in which case either __DEVICE_EMULATION__ must be defined,
* or CUDA must not be defined), is defined then verbose stderr output
* is generated, and various assertions and checks are compiled in.
*
* If USE_SHARED_MEMORY is defined, then each block copies the tableau
* and distance matrix it is operating on frmot he db in global memory
* into the block shared memory and uses it there, to take advantage
* of faster (but very small) shared memory. Not using this allows
* larger structures to be used. Note that, even when this is not
* defined, the ssetypes and maxscores vectors are kept in shared
* memory (this does not limit the maximum size of db structures).
*
*****************************************************************************/
#if defined(CUDA)
#include <math_functions.h>
#endif
#include "saparams.h"
#if defined(__DEVICE_EMULATION__) || !defined(CUDA)
#include <stdio.h>
#include <assert.h>
#endif
#if !defined(CUDA)
#include <math.h>
#include <hip/driver_types.h> /* for struct hipPitchedPtr, hipExtent */
#define __constant__
#define __shared__ static
#define __global__
#define __device__
#define MT_NN 1 /* not really used */
extern "C" float RandomHost(void);
#endif
#define EPS 1.1e-7 /* epsilon for making sure rand is < 1.0 */
#if defined(CUDA)
#if defined(USE_SHARED_MEMORY)
#define MAXDIM_KERNEL MAXDIM_GPU /*for shared memory,restrict to MAXDIM_GPU*/
#elif defined(SMALL_MAXDIM)
#define MAXDIM_KERNEL MAXDIM_GPU
#else
#define MAXDIM_KERNEL MAXDIM
#endif
#else
#define MAXDIM_KERNEL MAXDIM /* otherwise, use largest maxdim */
#endif
/*****************************************************************************
*
* __constant__ memory
*
* The query tableau and distance matrix is loaded into constant memory.
* These are MAXDIM not MAXDIM_KERNEL is constant memory is larger than
* shared memory so not so restircted (at the moment, on e.g. GTX285,
* constant memory is 64K but shared memory is only 16K per block).
*
*****************************************************************************/
#if !defined(CUDA)
/* tricky - we redefined these symbols to the host versions are different */
#define c_qn c_qn_host
#define c_qtab c_qtab_host
#define c_qdmat c_qdmat_host
#define c_qssetypes c_qssetypes_host
#else
/* nasty hack -- can't seem to be able to get constant memory in a seaparte
file, so forced to have 3 different versions of the constant for the
3 kernel versions. */
#if !defined(USE_SHARED_MEMORY)
#if defined(SMALL_MAXDIM)
#define c_qn c_qn_noshared_small
#define c_qtab c_qtab_noshared_small
#define c_qdmat c_qdmat_noshared_small
#define c_qssetypes c_qssetypes_noshared_small
#else
#define c_qn c_qn_noshared
#define c_qtab c_qtab_noshared
#define c_qdmat c_qdmat_noshared
#define c_qssetypes c_qssetypes_noshared
#endif
#endif
#endif
__constant__ int c_qn; // query structure size
__constant__ char c_qtab[MAXDIM*MAXDIM]; // query tableau
__constant__ float c_qdmat[MAXDIM*MAXDIM]; // query distance matrix
__constant__ char c_qssetypes[MAXDIM]; // main diagonal of c_qn
/*****************************************************************************
*
* __device__ functions: callable on GPU only, inlined
*
*****************************************************************************/
#if defined(CUDA)
// Fast GPU random number generator and Box-Muller transform
#include "MersenneTwister_kernel.cu"
#endif
/* Index into 2d m x n array stored in contiguous memory */
//#define INDEX2D(i,j,m,n) ( ((i)*(n) + (j)) )
/* Get char* to (i,j) element of into 2d array A strored in condiguous
* memory with pitch (CUDA version of Fortran stride or leading
* dimension). NB we don't index like INDEX2D but return address as char*
* which must then be cast to appropriate type, since the address/pitch
* computations in CUDA are always done in units of bytes, so don't want
* C address computation using size of actual type */
#define GET2D(A,i,j,pitch,type) ( *((type *)((char *)(A) + (i)*(pitch) + (j)*sizeof(type) )) )
/*
*
* tscord - Tableau (discrete) matching score function
*
* Return the tableau matching score between two tableau entries
* x and y.
* The score is 2 if the tableau entries are equal, 1 if they are
* equal in only one position, else -2.
*
* Parameters:
* x, y - the two two-char tableau codes encoded as 4 bits per char
* as per parsetableaux.c
*
* Return value:
* tableau matching score for x and y
*/
__device__ int tscord(char x, char y)
{
char xhigh,xlow,yhigh,ylow;
int score;
xhigh = (x & 0xF0);
xlow = (x & 0x0F);
yhigh = (y & 0xF0);
ylow = (y & 0x0F);
score = ( xhigh == yhigh ? (xlow == ylow ? 2 : 1) :
(xlow == ylow ? 1 : -2) );
/*
if (xhigh == yhigh)
{
if (xlow == ylow)
score = 2;
else
score = 1;
}
else if (xlow == ylow)
score = 1;
else
score = -2;
*/
return score;
}
/*
* Compute the score for a given SSE matching between two structures
* given their tableaux (discrete version), and distnace matrices.
*
* The score computed is
*
* \sum{i=1,j=1}^{N_A} \sum{j=1,k=1}^{N_B} \zeta(T_{ik},T_{kl}) x_{ik}x{jl}
*
* in the QIP formulation where x_{ij} is the binary indicator variable
* indication SSE i in A matched with SSE j in B.
*
* But actually here we are representing the matching with the ssemap
* vector so can much more efficiently compute this in only
* O(N_A^2) with 2 nested loops over the ssemap vector rather than
* requring O(N_A^2 N_B^2) with 4 nested loops in the naive implentation
* of the score computation using indicator variables (required only
* for using a general purpose QP solver, can do it more efficiently here).
*
* Furthermore, we can actually halve the computation since the tableaux
* matrices are symmetric by only iterating from k = i .. N_A
* inside the outer loop i = 1 .. N_A.
*
* Parameters:
* tab1 (input) encoded as two 4-bit char code
* Tableau for one structure. Symmetric.
*
* tab1_pitch (input) size_t
* pitch of tab1
*
* n1 (input) INTEGER
* Dimension of tab1 array.
*
* tab2 (input) encoded as two 4-bit char code
* Tableau for second structure. Symmetric.
*
* tab2_pitch (input) size_t
* pitch of tab2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* dmat1 (input) 2d float array
* SSE distance matrix for one structure. symmetric
*
* dmat1_pitch (input) size_t
* pitch of dmat1
*
* dmat2 (input) 2d float array
* SSE distance matrix for second structure. symmetric
*
* dmat2_pitch (input) size_t
* pitch of dmat2
*
* ssemap (input) int vector, dimension(n1)
* SSE map vector of dimension n1. Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
*
* Return value:
* The tableau matching score for given mapping by ssemap.
*
*/
__device__ int tmscord(char *tab1, size_t tab1_pitch, int n1,
char *tab2, size_t tab2_pitch, int n2,
char *dmat1, size_t dmat1_pitch,
char *dmat2, size_t dmat2_pitch,
int ssemap[])
{
int i,j,k,l;
int score;
score = 0;
for (i = 0; i < n1; i++)
{
for (k = i + 1; k < n1; k++)
{
j = ssemap[i];
l = ssemap[k];
/* only add to score when both are mapped to something, and */
/* diagonal entries are SSE type not angle so don't use them either */
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
assert(j == -1 || i != k && j != l);
#endif
if (j >= 0 && l >= 0)
{
/*
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %d %X %X\n", i,j,k,l,
GET2D(tab1,i,k,tab1_pitch,char),
GET2D(tab2,j,l,tab2_pitch,char));
fprintf(stderr, "%d %d %d %d %f %f (%f)\n",i,j,k,l,
GET2D(dmat1,i,k,dmat1_pitch,float) ,
GET2D(dmat2,j,l,dmat2_pitch,float) ,
fabsf(GET2D(dmat1,i,k,dmat1_pitch,float) - GET2D(dmat2,j,l,dmat2_pitch,float)) );
#endif
*/
/* don't add score when difference between SSE distances
exceeds threshold */
if (fabsf(GET2D(dmat1,i,k,dmat1_pitch,float) - GET2D(dmat2,j,l,dmat2_pitch,float)) <= MXSSED)
{
score += tscord(GET2D(tab1,i,k,tab1_pitch,char), GET2D(tab2,j,l,tab2_pitch,char));
}
}
}
}
return score;
}
/*
* deltasd -
*
* Compute the difference in score from due to removing a particular
* matching of two SSEs and replacing it with a new one.
* We can do this in O(N_A) time rather than the O(N_A^2) required for
* computing the score from scratch as in tmscord.
*
*
* Parameters:
* tab1 (input) encoded as two 4-bit char code
* Tableau for one structure. Symmetric.
*
* tab1_pitch (input) size_t
* pitch of tab1
*
* n1 (input) INTEGER
* Dimension of tab1 array.
*
* tab2 (input) encoded as two 4-bit char code
* Tableau for second structure. Symmetric.
*
* tab2_pitch (input) size_t
* pitch of tab2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* dmat1 (input) 2d float array
* SSE distance matrix for one structure. symmetric
*
* dmat1_pitch (input) size_t
* pitch of dmat1
*
* dmat2 (input) 2d float array
* SSE distance matrix for second structure. symmetric
*
* dmat2_pitch (input) size_t
* pitch of dmat2
*
* ssemap (input) int vector, dimension(n1)
* SSE map vector of dimension n1. Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
* sse_i (input) int
* SSE in tab1 that is being replaced with a new matchig
*
* old_j (input) int
* SSE in tab2 of old matching
*
* new_j (input) int
* SSE in tab2 of new matching (matched to new_i)
*
*
* Return value:
* The difference to add to the current score due to replacing
* the old_i <-> old_k matching with the new_i <-> new_k matching.
*
*/
__device__ int deltasd(char *tab1, size_t tab1_pitch, int n1,
char *tab2, size_t tab2_pitch, int n2,
char *dmat1, size_t dmat1_pitch,
char *dmat2, size_t dmat2_pitch,
int ssemap[],
int sse_i,
int old_j, int new_j)
{
int k,l;
int delta = 0;
float dmat1_i_k;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr,"aaa %d %d %d \n", sse_i, old_j, new_j);
#endif
for (k = 0; k < n1; k++)
{
l = ssemap[k];
if (l >= 0)
{
dmat1_i_k = GET2D(dmat1,sse_i,k,dmat1_pitch,float);
if (old_j >= 0 && l != old_j && k != sse_i && fabsf(dmat1_i_k - GET2D(dmat2,old_j,l,dmat2_pitch,float)) <= MXSSED)
delta -= tscord(GET2D(tab1,sse_i,k,tab1_pitch,char), GET2D(tab2,old_j,l,tab2_pitch,char));
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr,"yyy %d %d %d %d %d\n", sse_i, old_j, new_j,k,l);
#endif
if (new_j >= 0 && l != new_j && k != sse_i && fabsf(dmat1_i_k - GET2D(dmat2,new_j,l,dmat2_pitch,float)) <= MXSSED)
delta += tscord(GET2D(tab1,sse_i,k,tab1_pitch,char), GET2D(tab2,new_j,l,tab2_pitch,char));
}
}
return delta;
}
/*
* Build the initial mapping of the two structurs for heruristic
* tableaux matching algoriths.
*
* we make an initial matching where we just go along
* the sequence set match of same SSEs e.g. if 1st in query is helix,
* match that to first helix in db struture, and so on.
* (Unless LTYPE flag not set, then we don't care about SSE types and
* just go along sequence of SSEs).
* Then compute the score.
*
* Parameters:
*
*
* ssetypes1 (input) char vector length n1
* vector of SSE types in structure 1
*
* n1 (input) INTEGER
* Dimension of tab1 matrix
*
* ssetypes2 (input) char vector length n2
* vector of SSE types in structure 2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* lorder (input) LOGICAL
* if true, penalize matches between SSEs not maintaining sequence
* order between the tableaux i.e. if i < k and j >= l for i,k
* indices in tab1 and j,l indices in tab2.
*
*
* ssemap (output) INTEGER vector, dimension (n1)
* solution SSE map vector of dimension n1.
* Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
* revmap (output) INTEGER vector, dimension(n2)
* reverse ssemap: revmap(j) for j index in tab2 is the index i
* in tab1 that matches that sse i.e. if ssemap(i) = j then
* revmap(j) = ssemap(i) and vice versa, for quick lookup of what
* is matched so we can easily check that one-to-one mapping maintained
*
* istate (input/output) State for RNG
* mt (input/output) State vector for RNG
*
* Return value:
* on exit, status of the computation
* = 0 : successful exit
* = 1 : cannot setup intial ssemap with both lorder and ltype
*/
__device__ int thinit(char ssetypes1[], int n1,
char ssetypes2[], int n2,
int lorder,
int ssemap[], int revmap[],
int *iState, unsigned int mt[MT_NN])
{
int i,j;
int info = 0;
float randnum;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
for(int k = 0; k < n1; k++)
fprintf(stderr,"%02X ", ssetypes1[k]);
fprintf(stderr, "\n");
for(int k = 0; k < n2; k++)
fprintf(stderr,"%02X ", ssetypes2[k]);
fprintf(stderr, "\n");
#endif
/* initialize ssemap to all -1 meaning no match for each sse */
for (i = 0; i < n1; i++)
ssemap[i] = -1;
for (j = 0; j < n2; j++)
revmap[j] = -1;
/* initial SSE map set by matching along sequence, only matching SSEs
* of the saem type if LFTYPE flag is set.
*/
j = 0;
for (i = 0; i < n1; i++)
{
#if defined(CUDA)
randnum = RandomGPU(iState, mt);
#else
randnum = RandomHost();
#endif
if (randnum < INIT_MATCHPROB)
{
while (j < n2 && ssetypes1[i] != ssetypes2[j])
j++;
if (j >= n2)
{
/* not all SSEs in tab1 are mapped, but that's OK */
info = 0;
return info;
}
else
{
ssemap[i] = j;
revmap[j] = i;
j++;
}
}
}
return info;
}
/*
* find the index of first SSE of same type in tableaux that is not
* already mapped or -1 if not found
*
* Parameters:
*
* ssetypesvec (input) char vector length n
* vector of SSE types in structure
*
* n (input) INTEGER
* Dimension of tableaux, legnth of ssetypesvec
*
* startind (input) INTEGER
* SSE index to start at in tab
*
* ssetype (input) CHARACTER*2
* SSE type as two charcter string 'xa' etc.
*
* smap (input) INTEGER vector, dimension(n1)
* each smap(i) is index in other tableau it is already mapped
* to, or 0 for not mapped.
*
* endind (input) INTEGER
* last SSE index to consider in tab
*
* istate (input/output) State for RNG
* mt (input/output) State vecotr for RNG
*/
__device__ int randtypeind(char ssetypesvec[], int n,
int startind, char ssetype, int smap[], int endind,
int *iState, unsigned int mt[MT_NN])
{
int i,indi,rti;
int indlist[MAXDIM];
float randnum;
unsigned int randidx;
i = startind;
indi = 0;
rti = -1;
for (i = startind; i < endind; i++)
{
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
assert(i >= 0);
assert(i < n);
#endif
if (ssetypesvec[i] == ssetype && smap[i] < 0)
indlist[indi++] = i;
}
if (indi == 1)
rti = indlist[0];
else if (indi > 1)
{
#if defined(CUDA)
randnum = RandomGPU(iState, mt);
#else
randnum = RandomHost();
#endif
randidx = (unsigned int)((randnum - EPS) * indi);
rti = indlist[randidx];
}
return rti;
}
/*
* integer vector copy y <- x
*
*/
__device__ void icopy(int n, int x[], int y[])
{
int i;
for (i = 0; i < n; i++)
y[i] = x[i];
}
/*****************************************************************************
*
* __global__ functions: GPU kernels, callable from host
*
*****************************************************************************/
/*
* CUDA GPU kernel for tableau matching using simulated annealing.
*
* We make an initial matching where we just go along
* the sequence set match of same SSEs e.g. if 1st in query is helix,
* match that to first helix in db struture, and so on.
* (Unless LTYPE flag not set, then we don't care about SSE types and
* just go along sequence of SSEs).
* Then compute the score.
*
* Then we use simulated annealing to improve the score. At each
* iteration a random SSE is chosen to be remapped to a random
* other SSE (obeying constraints that are set) or
* mapped to no SSE in the other structure.
*
*
* "Embarrasingly parallel" version: just do all the loops in here,
* each thread does a different database structure.
* The query tableau and distance matrix is placed in constant memory
* for faster access (constant memory is cached but very limited size:
* we certainly can't put the whole db of structures there for instance).
*
* Parameters:
*
* dbsize (input) INTEGER
* number of strucures in database
*
* lorder (input) LOGICAL
* if true, penalize matches between SSEs not maintaining sequence
* order between the tableaux i.e. if i < k and j >= l for i,k
* indices in tab1 and j,l indices in tab2.
*
* lsoln (input) LOGICAL
* if true, return the SSE mapping for the best solution found.
*
* maxtart (input) INTEGER
* number of restarts (iteratinos of cooling schedule).
* Should be a multiple of blocksize.
*
* d_qdmat (input) float array, dimension (n1,n1)
* SSE distance matrix for query structure. symmetric
*
* d_qdmat_pitch (input) size_t
* pitch of d_qdmat
*
* d_tableaux (input) pointer to char arrays, CUDA Pitched Pointer
* Pointer to database of tableaux
*
* tableaux_extent (input) hipExtent
* Extent structure for d_tableaux
*
* d_ordrers ( input) pointer to ints
* Pointer to database of orders (order of each db tableau)
*
* d_distmatcies (input) pitched pointer to float arrays
* Pointer to database of distance matrices
*
* distmatrices_extent (input) hipExtent
* Extent structure for d_distmatrices
*
* outscore (output) INTEGER vector, dimension (dbsize)
* scores of matching query with each db structure
*
* outssemap (output) INTEGER array, dimension (dbszie, n1)
* solution SSE map vector of dimension n1 for each db structure
* Each ssemap(d,i) is the SSE index
* in dbentry d that SSE i in query is matched with.
*
*/
#if defined(CUDA)
#if defined(USE_SHARED_MEMORY)
__global__ void sa_tabsearch_gpu
#elif defined(SMALL_MAXDIM)
__global__ void sa_tabsearch_gpu_noshared_small
#else
__global__ void sa_tabsearch_gpu_noshared
#endif
#else
void sa_tabsearch_host
#endif
(int dbsize,
int lorder,
int lsoln,
int maxstart,
hipPitchedPtr d_tableaux,
hipExtent tableaux_extent,
int *d_orders,
hipPitchedPtr d_distmatrices,
hipExtent distmatrices_extent,
int *outscore,
int *outssemap)
{
/*
*
* __shared__ memory
*
* Each block of threads copies one database tableau and distance matrix
* from the global memory into shared memory. Each thread in the block
* runs the simulated annealing schedule (with different RNG) on the quey
* and this shared tableu+distmatrix, so the 'restarts' are pallelized
* within the block.
*
* Note the shared memory is very restriced in size (16K) so we can
* only fit limited size structures in it.
*
*/
#if defined(USE_SHARED_MEMORY)
__shared__ char s_tab[MAXDIM_KERNEL*MAXDIM_KERNEL];
__shared__ float s_dmat[MAXDIM_KERNEL*MAXDIM_KERNEL];
#endif
__shared__ char s_ssetypes[MAXDIM_KERNEL]; // TODO maybe shouldn't use this in shared
__shared__ int s_maxscores[128]; // FIXME should be max threads in block
__shared__ int s_maxscore_threadid;
/*
* automatic (register and local) memory
*/
// const int THREAD_N = blockDim.x * gridDim.x; // total number of threads
#if defined(CUDA)
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // thread id
const int blockid = blockIdx.x; // block id
const int gridDimx = gridDim.x; // number of blocks in grid
const int blockDimx = blockDim.x; // number of threads in block
const int threadIdxx = threadIdx.x; // thread id in the block
#else
const int tid = 0;
const int blockid = 0;
const int gridDimx = 1;
const int blockDimx = 1;
const int threadIdxx = 0;
#endif
int revmap[MAXDIM_KERNEL]; /* reverse ssemap: revmap(j) for j index in
tab2 is the index i in tab1 that matches that
sse i.e. if ssemap(i) = j then revmap(j) =
ssemap(i) and vice versa, for quick lookup of
what is matched so we can easily check that
one-to-one mapping maintained revmap has
dimension (n2) */
int bestmap[MAXDIM]; /* best ssemap feound. this has dimenion (n1) */
int ssemap[MAXDIM];
int maxscore,score,newscore;
int iter;
float temp;
float randnum;
int startj,endj,k,oldj,newj;
unsigned int ssei;
char *tab1;
char *tab2;
char *dmat1; // we use char* not float* to do pitched pointer arithmetic
char *dmat2;
int n1,n2;
int restart;
int dbi;
int iState;
unsigned int mt[MT_NN];
size_t tab1_pitch, tab2_pitch,dmat1_pitch,dmat2_pitch;
int i,j;
int blockmaxscore;
int delta;
#if defined(__DEVICE_EMULATION__)
fprintf(stderr, "running in device emulation mode\n");
fprintf(stderr, "sizeof(int) == %d\n", sizeof(int));
#endif
#if !defined(CUDA)
fprintf(stderr, "running on host\n");
#endif
#if defined(CUDA)
InitRandomGPU(&iState, mt);
#endif
n1 = c_qn;
tab1 = c_qtab;
tab1_pitch = MAXDIM; /* NB MAXDIM not MAXDIM_KERNEL, see comments on c_qtab */
dmat1 = (char*)c_qdmat;
dmat1_pitch = MAXDIM * sizeof(float);
// each of the gridDim.x blocks does as many as needed to do whole database
for (dbi = blockid; dbi < dbsize; dbi += gridDimx)
{
n2 = d_orders[dbi];
// get the tableau aray for db entry index dbi using pitched pointer
char *d_tableauxPtr = (char *)d_tableaux.ptr;
size_t tableauxPitch = d_tableaux.pitch;
size_t tableauxSlicePitch = tableauxPitch * tableaux_extent.height;
char *tableauxSlice = d_tableauxPtr + dbi * tableauxSlicePitch;
tab2 = tableauxSlice;
tab2_pitch = tableauxPitch;
// and similarly for distmatrices (2d float arrays)
char *d_distmatricesPtr = (char *)d_distmatrices.ptr;
size_t distmatricesPitch = d_distmatrices.pitch;
size_t distmatricesSlicePitch = distmatricesPitch * distmatrices_extent.height;
char *distmatricesSlice = d_distmatricesPtr + dbi * distmatricesSlicePitch;
dmat2 = distmatricesSlice;
dmat2_pitch = distmatricesPitch;
// set the s_ssetypes vector as main diagonal of this db instance tableau
// in parallel (each thread in block does one element)
for (j = threadIdxx; j < n2; j += blockDimx)
s_ssetypes[j] = GET2D(tab2,j,j,tab2_pitch,char); // use global not shared so no sync required
#if defined(USE_SHARED_MEMORY)
//
// parallel copy (each thread in block does as many elements as needed)
// of the db entry for this block into the shared memory for the block
// we'll have each thread do one row of the copy (may leave threads idle
// since likely to have more threads in block than rows in tableau).
//
for (i = threadIdxx; i < n2; i += blockDimx)
for (j = 0; j < n2; j++)
{
*(s_tab + i*MAXDIM_KERNEL + j) = GET2D(tab2,i,j,tab2_pitch,char);
*(s_dmat + i*MAXDIM_KERNEL + j) = GET2D(dmat2,i,j,dmat2_pitch,float);
}
tab2_pitch = MAXDIM_KERNEL; /* pitch is now leading dimension in shared */
dmat2_pitch = MAXDIM_KERNEL*sizeof(float);
#else
/* not using shared memory, just point the s_* variables to the
global memory */
char *s_tab = tab2;
char *s_dmat = dmat2;
#endif
#if defined(CUDA)
// sync point so all threads have loaded into shared memory
__syncthreads();
#endif
maxscore = -99999;
// each of the blockDim.x threads in the block does as many iterations
// as need to get to maxstart restarts
for (restart = 0; restart < maxstart; restart += blockDimx)
{
/* setup initial mapping */
thinit(c_qssetypes, n1, s_ssetypes, n2, lorder, ssemap, revmap,
&iState, mt);
score = tmscord(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char *)s_dmat, dmat2_pitch,
ssemap);
if (score > maxscore)
{
maxscore = score;
icopy(n1, ssemap, bestmap);
}
temp = TEMP0;
for (iter = 0; iter < MAXITER; iter++)
{
/* generate neighbour state by picking random SSE in tab1 and
moving its mapping to a radnom SSE in tab2, maintaining
constraints */
#if defined(CUDA)
randnum = RandomGPU(&iState, mt);
#else
randnum = RandomHost();
#endif
ssei = (unsigned int)((randnum - EPS) * n1);
#if defined (DEBUG) && !defined(CUDA)
fprintf(stderr, "xxx %f %d\n", randnum, ssei);
#endif
if (lorder)
{
startj = ssemap[ssei];
k = ssei;
while (startj < 0 && k >= 0)
{
startj = ssemap[k];
k--;
}
if (startj < 0)
startj = n2;
if (ssei == n1-1)
endj = n2;
else if (ssemap[ssei+1] < 0)
{
endj = -1;
k = 1;
while (endj == -1 && ssei + k < n1)
{
endj = ssemap[ssei + k];
k++;
}
}
else
endj = ssemap[ssei+1];
}
else
{
startj = 0;
endj = n2;
}
newj = randtypeind(s_ssetypes, n2, startj,
c_qssetypes[ssei],
revmap, endj, &iState,mt);
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %d %d %d %d\n", tid , restart, iter, ssei, startj, endj, newj);
fprintf(stderr, "%d ssemap: ", tid);
for (int q = 0; q < n1; q++)
fprintf(stderr, "%d ", ssemap[q]);
fprintf(stderr, "\n");
#endif
oldj = ssemap[ssei];
delta = deltasd(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char*)s_dmat, dmat2_pitch,
ssemap, ssei, oldj, newj);
#undef TESTING
#ifdef TESTING
#if defined(__DEVICE_EMULATION__) || !defined(CUDA)
int revnewmap[MAXDIM_KERNEL],ssenewmap[MAXDIM];
icopy(n1, ssemap, ssenewmap);
icopy(n2, revmap, revnewmap);
if (newj > -1)
{
ssenewmap[ssei] = newj;
if (oldj > -1)
revnewmap[oldj] = -1;
revnewmap[newj] = ssei;
}
else
{
/* the SSE was removed from the matching */
if (oldj > -1)
{
revnewmap[ssenewmap[ssei]] = -1;
revnewmap[oldj] = -1;
}
ssenewmap[ssei] = -1;
}
int fullscore = tmscord(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char*)s_dmat, dmat2_pitch,
ssenewmap);
// fprintf(stderr, "zzz %d %d %d\n", delta, score+delta,fullscore);
assert(score + delta == fullscore);
#endif
#endif
newscore = score + delta;
if (newscore > maxscore)
{
maxscore = newscore;
if (lsoln)
{
icopy(n1, ssemap, bestmap);
if (newj > -1)
bestmap[ssei] = newj;
else
bestmap[ssei] = -1;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "NNN %d %d %d\n", ssei, oldj, newj);
fprintf(stderr, "%d bestmap: ", tid);
for (int q = 0; q < n1; q++)
fprintf(stderr, "%d ", bestmap[q]);
fprintf(stderr, "\n");
#endif
}
}
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %f %d %d %f\n",tid, restart, iter, temp, score, newscore,
expf((float)delta / temp));
#endif
#if defined(CUDA)
randnum = RandomGPU(&iState, mt);
#else
randnum = RandomHost();
#endif
if (expf((float)delta / temp) > randnum)
{
/* accept the move, update ssemap and revmap accordingly */
score = newscore;
if (newj > -1)
{
ssemap[ssei] = newj;
if (oldj > -1)
revmap[oldj] = -1;
revmap[newj] = ssei;
}
else
{
/* the SSE was removed from the matching */
if (oldj > -1)
{
revmap[ssemap[ssei]] = -1;
revmap[oldj] = -1;
}
ssemap[ssei] = -1;
}
}
temp *= ALPHA;
}
}
s_maxscores[threadIdxx] = maxscore;
#if defined(CUDA)
// synchronization point: now we need to find max score over each thread in
// block for that block's db structure.
__syncthreads();
#endif
// reduction (MAX) over threads in block to get max score for
// TODO make this a proper reduction operation instead of a
// loop in a single thread
if (threadIdxx == 0)
{
s_maxscore_threadid = 0;
blockmaxscore = s_maxscores[0];
for (i = 1; i < blockDimx; i++)
{
if (s_maxscores[i] > blockmaxscore)
{
blockmaxscore = s_maxscores[i];
s_maxscore_threadid = i;
}
}
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d says maxscore is %d for %d\n", tid, blockmaxscore,dbi);
#endif
outscore[dbi] = blockmaxscore;
}
if (lsoln)
{
#if defined(CUDA)
// synchronization point: need to wait for threadid 0 to have found max
__syncthreads();
#endif
// Now that we have the best score and the thread that found it,
// THAT thread only will put its bestssemap as the output SSE map
if (threadIdxx == s_maxscore_threadid)
icopy(n1, bestmap, outssemap + dbi * MAXDIM);
}
}
}
|
c7ff9107b52e378d3b9ae763cd43c5fd0bca7639.cu
|
/*****************************************************************************
*
* File: cudaSaTabsearch_kernel.cu
* Author: Alex Stivala
* Created: January 2010
*
* $Id: cudaSaTabsearch_kernel.cu 3557 2010-04-13 00:54:09Z alexs $
*
* CUDA kernel for simulated annealing tableau matching (discrete).
* This is a CUDA implemenation of the FORTRAN subroutine TSAMTD,
* using a modified version of the CUDA SDK Mersenne Twister kernel
* to generate pseudorandom numbers.
*
* if CUDA preprocessor symbol is defined, this is the CUDA kernel version.
* __DEVICE_EMULATION__ may also be defined for this case (nvcc -deviceemu)
* in which case device emulation mode is being used
*
* Otherwise (CUDA symbol not defined), this builds a host (single threaded)
* version.
*
* if DEBUG (in which case either __DEVICE_EMULATION__ must be defined,
* or CUDA must not be defined), is defined then verbose stderr output
* is generated, and various assertions and checks are compiled in.
*
* If USE_SHARED_MEMORY is defined, then each block copies the tableau
* and distance matrix it is operating on frmot he db in global memory
* into the block shared memory and uses it there, to take advantage
* of faster (but very small) shared memory. Not using this allows
* larger structures to be used. Note that, even when this is not
* defined, the ssetypes and maxscores vectors are kept in shared
* memory (this does not limit the maximum size of db structures).
*
*****************************************************************************/
#if defined(CUDA)
#include <math_functions.h>
#endif
#include "saparams.h"
#if defined(__DEVICE_EMULATION__) || !defined(CUDA)
#include <stdio.h>
#include <assert.h>
#endif
#if !defined(CUDA)
#include <math.h>
#include <driver_types.h> /* for struct cudaPitchedPtr, cudaExtent */
#define __constant__
#define __shared__ static
#define __global__
#define __device__
#define MT_NN 1 /* not really used */
extern "C" float RandomHost(void);
#endif
#define EPS 1.1e-7 /* epsilon for making sure rand is < 1.0 */
#if defined(CUDA)
#if defined(USE_SHARED_MEMORY)
#define MAXDIM_KERNEL MAXDIM_GPU /*for shared memory,restrict to MAXDIM_GPU*/
#elif defined(SMALL_MAXDIM)
#define MAXDIM_KERNEL MAXDIM_GPU
#else
#define MAXDIM_KERNEL MAXDIM
#endif
#else
#define MAXDIM_KERNEL MAXDIM /* otherwise, use largest maxdim */
#endif
/*****************************************************************************
*
* __constant__ memory
*
* The query tableau and distance matrix is loaded into constant memory.
* These are MAXDIM not MAXDIM_KERNEL is constant memory is larger than
* shared memory so not so restircted (at the moment, on e.g. GTX285,
* constant memory is 64K but shared memory is only 16K per block).
*
*****************************************************************************/
#if !defined(CUDA)
/* tricky - we redefined these symbols to the host versions are different */
#define c_qn c_qn_host
#define c_qtab c_qtab_host
#define c_qdmat c_qdmat_host
#define c_qssetypes c_qssetypes_host
#else
/* nasty hack -- can't seem to be able to get constant memory in a seaparte
file, so forced to have 3 different versions of the constant for the
3 kernel versions. */
#if !defined(USE_SHARED_MEMORY)
#if defined(SMALL_MAXDIM)
#define c_qn c_qn_noshared_small
#define c_qtab c_qtab_noshared_small
#define c_qdmat c_qdmat_noshared_small
#define c_qssetypes c_qssetypes_noshared_small
#else
#define c_qn c_qn_noshared
#define c_qtab c_qtab_noshared
#define c_qdmat c_qdmat_noshared
#define c_qssetypes c_qssetypes_noshared
#endif
#endif
#endif
__constant__ int c_qn; // query structure size
__constant__ char c_qtab[MAXDIM*MAXDIM]; // query tableau
__constant__ float c_qdmat[MAXDIM*MAXDIM]; // query distance matrix
__constant__ char c_qssetypes[MAXDIM]; // main diagonal of c_qn
/*****************************************************************************
*
* __device__ functions: callable on GPU only, inlined
*
*****************************************************************************/
#if defined(CUDA)
// Fast GPU random number generator and Box-Muller transform
#include "MersenneTwister_kernel.cu"
#endif
/* Index into 2d m x n array stored in contiguous memory */
//#define INDEX2D(i,j,m,n) ( ((i)*(n) + (j)) )
/* Get char* to (i,j) element of into 2d array A strored in condiguous
* memory with pitch (CUDA version of Fortran stride or leading
* dimension). NB we don't index like INDEX2D but return address as char*
* which must then be cast to appropriate type, since the address/pitch
* computations in CUDA are always done in units of bytes, so don't want
* C address computation using size of actual type */
#define GET2D(A,i,j,pitch,type) ( *((type *)((char *)(A) + (i)*(pitch) + (j)*sizeof(type) )) )
/*
*
* tscord - Tableau (discrete) matching score function
*
* Return the tableau matching score between two tableau entries
* x and y.
* The score is 2 if the tableau entries are equal, 1 if they are
* equal in only one position, else -2.
*
* Parameters:
* x, y - the two two-char tableau codes encoded as 4 bits per char
* as per parsetableaux.c
*
* Return value:
* tableau matching score for x and y
*/
__device__ int tscord(char x, char y)
{
char xhigh,xlow,yhigh,ylow;
int score;
xhigh = (x & 0xF0);
xlow = (x & 0x0F);
yhigh = (y & 0xF0);
ylow = (y & 0x0F);
score = ( xhigh == yhigh ? (xlow == ylow ? 2 : 1) :
(xlow == ylow ? 1 : -2) );
/*
if (xhigh == yhigh)
{
if (xlow == ylow)
score = 2;
else
score = 1;
}
else if (xlow == ylow)
score = 1;
else
score = -2;
*/
return score;
}
/*
* Compute the score for a given SSE matching between two structures
* given their tableaux (discrete version), and distnace matrices.
*
* The score computed is
*
* \sum{i=1,j=1}^{N_A} \sum{j=1,k=1}^{N_B} \zeta(T_{ik},T_{kl}) x_{ik}x{jl}
*
* in the QIP formulation where x_{ij} is the binary indicator variable
* indication SSE i in A matched with SSE j in B.
*
* But actually here we are representing the matching with the ssemap
* vector so can much more efficiently compute this in only
* O(N_A^2) with 2 nested loops over the ssemap vector rather than
* requring O(N_A^2 N_B^2) with 4 nested loops in the naive implentation
* of the score computation using indicator variables (required only
* for using a general purpose QP solver, can do it more efficiently here).
*
* Furthermore, we can actually halve the computation since the tableaux
* matrices are symmetric by only iterating from k = i .. N_A
* inside the outer loop i = 1 .. N_A.
*
* Parameters:
* tab1 (input) encoded as two 4-bit char code
* Tableau for one structure. Symmetric.
*
* tab1_pitch (input) size_t
* pitch of tab1
*
* n1 (input) INTEGER
* Dimension of tab1 array.
*
* tab2 (input) encoded as two 4-bit char code
* Tableau for second structure. Symmetric.
*
* tab2_pitch (input) size_t
* pitch of tab2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* dmat1 (input) 2d float array
* SSE distance matrix for one structure. symmetric
*
* dmat1_pitch (input) size_t
* pitch of dmat1
*
* dmat2 (input) 2d float array
* SSE distance matrix for second structure. symmetric
*
* dmat2_pitch (input) size_t
* pitch of dmat2
*
* ssemap (input) int vector, dimension(n1)
* SSE map vector of dimension n1. Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
*
* Return value:
* The tableau matching score for given mapping by ssemap.
*
*/
__device__ int tmscord(char *tab1, size_t tab1_pitch, int n1,
char *tab2, size_t tab2_pitch, int n2,
char *dmat1, size_t dmat1_pitch,
char *dmat2, size_t dmat2_pitch,
int ssemap[])
{
int i,j,k,l;
int score;
score = 0;
for (i = 0; i < n1; i++)
{
for (k = i + 1; k < n1; k++)
{
j = ssemap[i];
l = ssemap[k];
/* only add to score when both are mapped to something, and */
/* diagonal entries are SSE type not angle so don't use them either */
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
assert(j == -1 || i != k && j != l);
#endif
if (j >= 0 && l >= 0)
{
/*
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %d %X %X\n", i,j,k,l,
GET2D(tab1,i,k,tab1_pitch,char),
GET2D(tab2,j,l,tab2_pitch,char));
fprintf(stderr, "%d %d %d %d %f %f (%f)\n",i,j,k,l,
GET2D(dmat1,i,k,dmat1_pitch,float) ,
GET2D(dmat2,j,l,dmat2_pitch,float) ,
fabsf(GET2D(dmat1,i,k,dmat1_pitch,float) - GET2D(dmat2,j,l,dmat2_pitch,float)) );
#endif
*/
/* don't add score when difference between SSE distances
exceeds threshold */
if (fabsf(GET2D(dmat1,i,k,dmat1_pitch,float) - GET2D(dmat2,j,l,dmat2_pitch,float)) <= MXSSED)
{
score += tscord(GET2D(tab1,i,k,tab1_pitch,char), GET2D(tab2,j,l,tab2_pitch,char));
}
}
}
}
return score;
}
/*
* deltasd -
*
* Compute the difference in score from due to removing a particular
* matching of two SSEs and replacing it with a new one.
* We can do this in O(N_A) time rather than the O(N_A^2) required for
* computing the score from scratch as in tmscord.
*
*
* Parameters:
* tab1 (input) encoded as two 4-bit char code
* Tableau for one structure. Symmetric.
*
* tab1_pitch (input) size_t
* pitch of tab1
*
* n1 (input) INTEGER
* Dimension of tab1 array.
*
* tab2 (input) encoded as two 4-bit char code
* Tableau for second structure. Symmetric.
*
* tab2_pitch (input) size_t
* pitch of tab2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* dmat1 (input) 2d float array
* SSE distance matrix for one structure. symmetric
*
* dmat1_pitch (input) size_t
* pitch of dmat1
*
* dmat2 (input) 2d float array
* SSE distance matrix for second structure. symmetric
*
* dmat2_pitch (input) size_t
* pitch of dmat2
*
* ssemap (input) int vector, dimension(n1)
* SSE map vector of dimension n1. Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
* sse_i (input) int
* SSE in tab1 that is being replaced with a new matchig
*
* old_j (input) int
* SSE in tab2 of old matching
*
* new_j (input) int
* SSE in tab2 of new matching (matched to new_i)
*
*
* Return value:
* The difference to add to the current score due to replacing
* the old_i <-> old_k matching with the new_i <-> new_k matching.
*
*/
__device__ int deltasd(char *tab1, size_t tab1_pitch, int n1,
char *tab2, size_t tab2_pitch, int n2,
char *dmat1, size_t dmat1_pitch,
char *dmat2, size_t dmat2_pitch,
int ssemap[],
int sse_i,
int old_j, int new_j)
{
int k,l;
int delta = 0;
float dmat1_i_k;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr,"aaa %d %d %d \n", sse_i, old_j, new_j);
#endif
for (k = 0; k < n1; k++)
{
l = ssemap[k];
if (l >= 0)
{
dmat1_i_k = GET2D(dmat1,sse_i,k,dmat1_pitch,float);
if (old_j >= 0 && l != old_j && k != sse_i && fabsf(dmat1_i_k - GET2D(dmat2,old_j,l,dmat2_pitch,float)) <= MXSSED)
delta -= tscord(GET2D(tab1,sse_i,k,tab1_pitch,char), GET2D(tab2,old_j,l,tab2_pitch,char));
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr,"yyy %d %d %d %d %d\n", sse_i, old_j, new_j,k,l);
#endif
if (new_j >= 0 && l != new_j && k != sse_i && fabsf(dmat1_i_k - GET2D(dmat2,new_j,l,dmat2_pitch,float)) <= MXSSED)
delta += tscord(GET2D(tab1,sse_i,k,tab1_pitch,char), GET2D(tab2,new_j,l,tab2_pitch,char));
}
}
return delta;
}
/*
* Build the initial mapping of the two structurs for heruristic
* tableaux matching algoriths.
*
* we make an initial matching where we just go along
* the sequence set match of same SSEs e.g. if 1st in query is helix,
* match that to first helix in db struture, and so on.
* (Unless LTYPE flag not set, then we don't care about SSE types and
* just go along sequence of SSEs).
* Then compute the score.
*
* Parameters:
*
*
* ssetypes1 (input) char vector length n1
* vector of SSE types in structure 1
*
* n1 (input) INTEGER
* Dimension of tab1 matrix
*
* ssetypes2 (input) char vector length n2
* vector of SSE types in structure 2
*
* n2 (input) INTEGER
* Dimension of tab2 matrix.
*
* lorder (input) LOGICAL
* if true, penalize matches between SSEs not maintaining sequence
* order between the tableaux i.e. if i < k and j >= l for i,k
* indices in tab1 and j,l indices in tab2.
*
*
* ssemap (output) INTEGER vector, dimension (n1)
* solution SSE map vector of dimension n1.
* Each ssemap(i) is the SSE index
* in tab2 that SSE i in tab1 is matched with.
*
* revmap (output) INTEGER vector, dimension(n2)
* reverse ssemap: revmap(j) for j index in tab2 is the index i
* in tab1 that matches that sse i.e. if ssemap(i) = j then
* revmap(j) = ssemap(i) and vice versa, for quick lookup of what
* is matched so we can easily check that one-to-one mapping maintained
*
* istate (input/output) State for RNG
* mt (input/output) State vector for RNG
*
* Return value:
* on exit, status of the computation
* = 0 : successful exit
* = 1 : cannot setup intial ssemap with both lorder and ltype
*/
__device__ int thinit(char ssetypes1[], int n1,
char ssetypes2[], int n2,
int lorder,
int ssemap[], int revmap[],
int *iState, unsigned int mt[MT_NN])
{
int i,j;
int info = 0;
float randnum;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
for(int k = 0; k < n1; k++)
fprintf(stderr,"%02X ", ssetypes1[k]);
fprintf(stderr, "\n");
for(int k = 0; k < n2; k++)
fprintf(stderr,"%02X ", ssetypes2[k]);
fprintf(stderr, "\n");
#endif
/* initialize ssemap to all -1 meaning no match for each sse */
for (i = 0; i < n1; i++)
ssemap[i] = -1;
for (j = 0; j < n2; j++)
revmap[j] = -1;
/* initial SSE map set by matching along sequence, only matching SSEs
* of the saem type if LFTYPE flag is set.
*/
j = 0;
for (i = 0; i < n1; i++)
{
#if defined(CUDA)
randnum = RandomGPU(iState, mt);
#else
randnum = RandomHost();
#endif
if (randnum < INIT_MATCHPROB)
{
while (j < n2 && ssetypes1[i] != ssetypes2[j])
j++;
if (j >= n2)
{
/* not all SSEs in tab1 are mapped, but that's OK */
info = 0;
return info;
}
else
{
ssemap[i] = j;
revmap[j] = i;
j++;
}
}
}
return info;
}
/*
* find the index of first SSE of same type in tableaux that is not
* already mapped or -1 if not found
*
* Parameters:
*
* ssetypesvec (input) char vector length n
* vector of SSE types in structure
*
* n (input) INTEGER
* Dimension of tableaux, legnth of ssetypesvec
*
* startind (input) INTEGER
* SSE index to start at in tab
*
* ssetype (input) CHARACTER*2
* SSE type as two charcter string 'xa' etc.
*
* smap (input) INTEGER vector, dimension(n1)
* each smap(i) is index in other tableau it is already mapped
* to, or 0 for not mapped.
*
* endind (input) INTEGER
* last SSE index to consider in tab
*
* istate (input/output) State for RNG
* mt (input/output) State vecotr for RNG
*/
__device__ int randtypeind(char ssetypesvec[], int n,
int startind, char ssetype, int smap[], int endind,
int *iState, unsigned int mt[MT_NN])
{
int i,indi,rti;
int indlist[MAXDIM];
float randnum;
unsigned int randidx;
i = startind;
indi = 0;
rti = -1;
for (i = startind; i < endind; i++)
{
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
assert(i >= 0);
assert(i < n);
#endif
if (ssetypesvec[i] == ssetype && smap[i] < 0)
indlist[indi++] = i;
}
if (indi == 1)
rti = indlist[0];
else if (indi > 1)
{
#if defined(CUDA)
randnum = RandomGPU(iState, mt);
#else
randnum = RandomHost();
#endif
randidx = (unsigned int)((randnum - EPS) * indi);
rti = indlist[randidx];
}
return rti;
}
/*
* integer vector copy y <- x
*
*/
__device__ void icopy(int n, int x[], int y[])
{
int i;
for (i = 0; i < n; i++)
y[i] = x[i];
}
/*****************************************************************************
*
* __global__ functions: GPU kernels, callable from host
*
*****************************************************************************/
/*
* CUDA GPU kernel for tableau matching using simulated annealing.
*
* We make an initial matching where we just go along
* the sequence set match of same SSEs e.g. if 1st in query is helix,
* match that to first helix in db struture, and so on.
* (Unless LTYPE flag not set, then we don't care about SSE types and
* just go along sequence of SSEs).
* Then compute the score.
*
* Then we use simulated annealing to improve the score. At each
* iteration a random SSE is chosen to be remapped to a random
* other SSE (obeying constraints that are set) or
* mapped to no SSE in the other structure.
*
*
* "Embarrasingly parallel" version: just do all the loops in here,
* each thread does a different database structure.
* The query tableau and distance matrix is placed in constant memory
* for faster access (constant memory is cached but very limited size:
* we certainly can't put the whole db of structures there for instance).
*
* Parameters:
*
* dbsize (input) INTEGER
* number of strucures in database
*
* lorder (input) LOGICAL
* if true, penalize matches between SSEs not maintaining sequence
* order between the tableaux i.e. if i < k and j >= l for i,k
* indices in tab1 and j,l indices in tab2.
*
* lsoln (input) LOGICAL
* if true, return the SSE mapping for the best solution found.
*
* maxtart (input) INTEGER
* number of restarts (iteratinos of cooling schedule).
* Should be a multiple of blocksize.
*
* d_qdmat (input) float array, dimension (n1,n1)
* SSE distance matrix for query structure. symmetric
*
* d_qdmat_pitch (input) size_t
* pitch of d_qdmat
*
* d_tableaux (input) pointer to char arrays, CUDA Pitched Pointer
* Pointer to database of tableaux
*
* tableaux_extent (input) cudaExtent
* Extent structure for d_tableaux
*
* d_ordrers ( input) pointer to ints
* Pointer to database of orders (order of each db tableau)
*
* d_distmatcies (input) pitched pointer to float arrays
* Pointer to database of distance matrices
*
* distmatrices_extent (input) cudaExtent
* Extent structure for d_distmatrices
*
* outscore (output) INTEGER vector, dimension (dbsize)
* scores of matching query with each db structure
*
* outssemap (output) INTEGER array, dimension (dbszie, n1)
* solution SSE map vector of dimension n1 for each db structure
* Each ssemap(d,i) is the SSE index
* in dbentry d that SSE i in query is matched with.
*
*/
#if defined(CUDA)
#if defined(USE_SHARED_MEMORY)
__global__ void sa_tabsearch_gpu
#elif defined(SMALL_MAXDIM)
__global__ void sa_tabsearch_gpu_noshared_small
#else
__global__ void sa_tabsearch_gpu_noshared
#endif
#else
void sa_tabsearch_host
#endif
(int dbsize,
int lorder,
int lsoln,
int maxstart,
cudaPitchedPtr d_tableaux,
cudaExtent tableaux_extent,
int *d_orders,
cudaPitchedPtr d_distmatrices,
cudaExtent distmatrices_extent,
int *outscore,
int *outssemap)
{
/*
*
* __shared__ memory
*
* Each block of threads copies one database tableau and distance matrix
* from the global memory into shared memory. Each thread in the block
* runs the simulated annealing schedule (with different RNG) on the quey
* and this shared tableu+distmatrix, so the 'restarts' are pallelized
* within the block.
*
* Note the shared memory is very restriced in size (16K) so we can
* only fit limited size structures in it.
*
*/
#if defined(USE_SHARED_MEMORY)
__shared__ char s_tab[MAXDIM_KERNEL*MAXDIM_KERNEL];
__shared__ float s_dmat[MAXDIM_KERNEL*MAXDIM_KERNEL];
#endif
__shared__ char s_ssetypes[MAXDIM_KERNEL]; // TODO maybe shouldn't use this in shared
__shared__ int s_maxscores[128]; // FIXME should be max threads in block
__shared__ int s_maxscore_threadid;
/*
* automatic (register and local) memory
*/
// const int THREAD_N = blockDim.x * gridDim.x; // total number of threads
#if defined(CUDA)
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // thread id
const int blockid = blockIdx.x; // block id
const int gridDimx = gridDim.x; // number of blocks in grid
const int blockDimx = blockDim.x; // number of threads in block
const int threadIdxx = threadIdx.x; // thread id in the block
#else
const int tid = 0;
const int blockid = 0;
const int gridDimx = 1;
const int blockDimx = 1;
const int threadIdxx = 0;
#endif
int revmap[MAXDIM_KERNEL]; /* reverse ssemap: revmap(j) for j index in
tab2 is the index i in tab1 that matches that
sse i.e. if ssemap(i) = j then revmap(j) =
ssemap(i) and vice versa, for quick lookup of
what is matched so we can easily check that
one-to-one mapping maintained revmap has
dimension (n2) */
int bestmap[MAXDIM]; /* best ssemap feound. this has dimenion (n1) */
int ssemap[MAXDIM];
int maxscore,score,newscore;
int iter;
float temp;
float randnum;
int startj,endj,k,oldj,newj;
unsigned int ssei;
char *tab1;
char *tab2;
char *dmat1; // we use char* not float* to do pitched pointer arithmetic
char *dmat2;
int n1,n2;
int restart;
int dbi;
int iState;
unsigned int mt[MT_NN];
size_t tab1_pitch, tab2_pitch,dmat1_pitch,dmat2_pitch;
int i,j;
int blockmaxscore;
int delta;
#if defined(__DEVICE_EMULATION__)
fprintf(stderr, "running in device emulation mode\n");
fprintf(stderr, "sizeof(int) == %d\n", sizeof(int));
#endif
#if !defined(CUDA)
fprintf(stderr, "running on host\n");
#endif
#if defined(CUDA)
InitRandomGPU(&iState, mt);
#endif
n1 = c_qn;
tab1 = c_qtab;
tab1_pitch = MAXDIM; /* NB MAXDIM not MAXDIM_KERNEL, see comments on c_qtab */
dmat1 = (char*)c_qdmat;
dmat1_pitch = MAXDIM * sizeof(float);
// each of the gridDim.x blocks does as many as needed to do whole database
for (dbi = blockid; dbi < dbsize; dbi += gridDimx)
{
n2 = d_orders[dbi];
// get the tableau aray for db entry index dbi using pitched pointer
char *d_tableauxPtr = (char *)d_tableaux.ptr;
size_t tableauxPitch = d_tableaux.pitch;
size_t tableauxSlicePitch = tableauxPitch * tableaux_extent.height;
char *tableauxSlice = d_tableauxPtr + dbi * tableauxSlicePitch;
tab2 = tableauxSlice;
tab2_pitch = tableauxPitch;
// and similarly for distmatrices (2d float arrays)
char *d_distmatricesPtr = (char *)d_distmatrices.ptr;
size_t distmatricesPitch = d_distmatrices.pitch;
size_t distmatricesSlicePitch = distmatricesPitch * distmatrices_extent.height;
char *distmatricesSlice = d_distmatricesPtr + dbi * distmatricesSlicePitch;
dmat2 = distmatricesSlice;
dmat2_pitch = distmatricesPitch;
// set the s_ssetypes vector as main diagonal of this db instance tableau
// in parallel (each thread in block does one element)
for (j = threadIdxx; j < n2; j += blockDimx)
s_ssetypes[j] = GET2D(tab2,j,j,tab2_pitch,char); // use global not shared so no sync required
#if defined(USE_SHARED_MEMORY)
//
// parallel copy (each thread in block does as many elements as needed)
// of the db entry for this block into the shared memory for the block
// we'll have each thread do one row of the copy (may leave threads idle
// since likely to have more threads in block than rows in tableau).
//
for (i = threadIdxx; i < n2; i += blockDimx)
for (j = 0; j < n2; j++)
{
*(s_tab + i*MAXDIM_KERNEL + j) = GET2D(tab2,i,j,tab2_pitch,char);
*(s_dmat + i*MAXDIM_KERNEL + j) = GET2D(dmat2,i,j,dmat2_pitch,float);
}
tab2_pitch = MAXDIM_KERNEL; /* pitch is now leading dimension in shared */
dmat2_pitch = MAXDIM_KERNEL*sizeof(float);
#else
/* not using shared memory, just point the s_* variables to the
global memory */
char *s_tab = tab2;
char *s_dmat = dmat2;
#endif
#if defined(CUDA)
// sync point so all threads have loaded into shared memory
__syncthreads();
#endif
maxscore = -99999;
// each of the blockDim.x threads in the block does as many iterations
// as need to get to maxstart restarts
for (restart = 0; restart < maxstart; restart += blockDimx)
{
/* setup initial mapping */
thinit(c_qssetypes, n1, s_ssetypes, n2, lorder, ssemap, revmap,
&iState, mt);
score = tmscord(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char *)s_dmat, dmat2_pitch,
ssemap);
if (score > maxscore)
{
maxscore = score;
icopy(n1, ssemap, bestmap);
}
temp = TEMP0;
for (iter = 0; iter < MAXITER; iter++)
{
/* generate neighbour state by picking random SSE in tab1 and
moving its mapping to a radnom SSE in tab2, maintaining
constraints */
#if defined(CUDA)
randnum = RandomGPU(&iState, mt);
#else
randnum = RandomHost();
#endif
ssei = (unsigned int)((randnum - EPS) * n1);
#if defined (DEBUG) && !defined(CUDA)
fprintf(stderr, "xxx %f %d\n", randnum, ssei);
#endif
if (lorder)
{
startj = ssemap[ssei];
k = ssei;
while (startj < 0 && k >= 0)
{
startj = ssemap[k];
k--;
}
if (startj < 0)
startj = n2;
if (ssei == n1-1)
endj = n2;
else if (ssemap[ssei+1] < 0)
{
endj = -1;
k = 1;
while (endj == -1 && ssei + k < n1)
{
endj = ssemap[ssei + k];
k++;
}
}
else
endj = ssemap[ssei+1];
}
else
{
startj = 0;
endj = n2;
}
newj = randtypeind(s_ssetypes, n2, startj,
c_qssetypes[ssei],
revmap, endj, &iState,mt);
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %d %d %d %d\n", tid , restart, iter, ssei, startj, endj, newj);
fprintf(stderr, "%d ssemap: ", tid);
for (int q = 0; q < n1; q++)
fprintf(stderr, "%d ", ssemap[q]);
fprintf(stderr, "\n");
#endif
oldj = ssemap[ssei];
delta = deltasd(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char*)s_dmat, dmat2_pitch,
ssemap, ssei, oldj, newj);
#undef TESTING
#ifdef TESTING
#if defined(__DEVICE_EMULATION__) || !defined(CUDA)
int revnewmap[MAXDIM_KERNEL],ssenewmap[MAXDIM];
icopy(n1, ssemap, ssenewmap);
icopy(n2, revmap, revnewmap);
if (newj > -1)
{
ssenewmap[ssei] = newj;
if (oldj > -1)
revnewmap[oldj] = -1;
revnewmap[newj] = ssei;
}
else
{
/* the SSE was removed from the matching */
if (oldj > -1)
{
revnewmap[ssenewmap[ssei]] = -1;
revnewmap[oldj] = -1;
}
ssenewmap[ssei] = -1;
}
int fullscore = tmscord(tab1, tab1_pitch, n1, s_tab, tab2_pitch, n2,
dmat1, dmat1_pitch,
(char*)s_dmat, dmat2_pitch,
ssenewmap);
// fprintf(stderr, "zzz %d %d %d\n", delta, score+delta,fullscore);
assert(score + delta == fullscore);
#endif
#endif
newscore = score + delta;
if (newscore > maxscore)
{
maxscore = newscore;
if (lsoln)
{
icopy(n1, ssemap, bestmap);
if (newj > -1)
bestmap[ssei] = newj;
else
bestmap[ssei] = -1;
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "NNN %d %d %d\n", ssei, oldj, newj);
fprintf(stderr, "%d bestmap: ", tid);
for (int q = 0; q < n1; q++)
fprintf(stderr, "%d ", bestmap[q]);
fprintf(stderr, "\n");
#endif
}
}
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d %d %d %f %d %d %f\n",tid, restart, iter, temp, score, newscore,
expf((float)delta / temp));
#endif
#if defined(CUDA)
randnum = RandomGPU(&iState, mt);
#else
randnum = RandomHost();
#endif
if (expf((float)delta / temp) > randnum)
{
/* accept the move, update ssemap and revmap accordingly */
score = newscore;
if (newj > -1)
{
ssemap[ssei] = newj;
if (oldj > -1)
revmap[oldj] = -1;
revmap[newj] = ssei;
}
else
{
/* the SSE was removed from the matching */
if (oldj > -1)
{
revmap[ssemap[ssei]] = -1;
revmap[oldj] = -1;
}
ssemap[ssei] = -1;
}
}
temp *= ALPHA;
}
}
s_maxscores[threadIdxx] = maxscore;
#if defined(CUDA)
// synchronization point: now we need to find max score over each thread in
// block for that block's db structure.
__syncthreads();
#endif
// reduction (MAX) over threads in block to get max score for
// TODO make this a proper reduction operation instead of a
// loop in a single thread
if (threadIdxx == 0)
{
s_maxscore_threadid = 0;
blockmaxscore = s_maxscores[0];
for (i = 1; i < blockDimx; i++)
{
if (s_maxscores[i] > blockmaxscore)
{
blockmaxscore = s_maxscores[i];
s_maxscore_threadid = i;
}
}
#if defined(__DEVICE_EMULATION__) || (defined(DEBUG) && !defined(CUDA))
fprintf(stderr, "%d says maxscore is %d for %d\n", tid, blockmaxscore,dbi);
#endif
outscore[dbi] = blockmaxscore;
}
if (lsoln)
{
#if defined(CUDA)
// synchronization point: need to wait for threadid 0 to have found max
__syncthreads();
#endif
// Now that we have the best score and the thread that found it,
// THAT thread only will put its bestssemap as the output SSE map
if (threadIdxx == s_maxscore_threadid)
icopy(n1, bestmap, outssemap + dbi * MAXDIM);
}
}
}
|
85240e937e5e8a6e899b448c24b6a8e1ff4454b7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if CUDART_VERSION >= 10010
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/eig.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct EigSelInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
int n;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const EigSelInputs<T> &dims) {
return os;
}
template <typename T>
class EigSelTest : public ::testing::TestWithParam<EigSelInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<EigSelInputs<T>>::GetParam();
int len = params.len;
allocate(cov_matrix, len);
T cov_matrix_h[] = {1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81,
0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
updateDevice(cov_matrix, cov_matrix_h, len, stream);
allocate(eig_vectors, 12);
allocate(eig_vals, params.n_col);
T eig_vectors_ref_h[] = {-0.5123, 0.4874, 0.4874, -0.5123, 0.6498, 0.2789,
-0.2789, -0.6498, 0.4874, 0.5123, 0.5123, 0.4874};
T eig_vals_ref_h[] = {0.1024, 0.3096, 3.5266, 3.5266};
allocate(eig_vectors_ref, 12);
allocate(eig_vals_ref, params.n_col);
updateDevice(eig_vectors_ref, eig_vectors_ref_h, 12, stream);
updateDevice(eig_vals_ref, eig_vals_ref_h, 4, stream);
eigSelDC(cov_matrix, params.n_row, params.n_col, 3, eig_vectors, eig_vals,
EigVecMemUsage::OVERWRITE_INPUT, cusolverH, stream, allocator);
}
void TearDown() override {
CUDA_CHECK(hipFree(cov_matrix));
CUDA_CHECK(hipFree(eig_vectors));
CUDA_CHECK(hipFree(eig_vals));
CUDA_CHECK(hipFree(eig_vectors_ref));
CUDA_CHECK(hipFree(eig_vals_ref));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
EigSelInputs<T> params;
T *cov_matrix, *eig_vectors, *eig_vectors_ref, *eig_vals, *eig_vals_ref;
hipsolverDnHandle_t cusolverH = NULL;
hipStream_t stream;
};
const std::vector<EigSelInputs<float>> inputsf2 = {
{0.001f, 4 * 4, 4, 4, 1234ULL, 256}};
const std::vector<EigSelInputs<double>> inputsd2 = {
{0.001, 4 * 4, 4, 4, 1234ULL, 256}};
typedef EigSelTest<float> EigSelTestValF;
TEST_P(EigSelTestValF, Result) {
ASSERT_TRUE(devArrMatch(eig_vals_ref, eig_vals, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef EigSelTest<double> EigSelTestValD;
TEST_P(EigSelTestValD, Result) {
ASSERT_TRUE(devArrMatch(eig_vals_ref, eig_vals, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef EigSelTest<float> EigSelTestVecF;
TEST_P(EigSelTestVecF, Result) {
ASSERT_TRUE(devArrMatch(eig_vectors_ref, eig_vectors, 12,
CompareApproxAbs<float>(params.tolerance)));
}
typedef EigSelTest<double> EigSelTestVecD;
TEST_P(EigSelTestVecD, Result) {
ASSERT_TRUE(devArrMatch(eig_vectors_ref, eig_vectors, 12,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestValD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestVecD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
#endif
|
85240e937e5e8a6e899b448c24b6a8e1ff4454b7.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if CUDART_VERSION >= 10010
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <linalg/eig.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct EigSelInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
int n;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const EigSelInputs<T> &dims) {
return os;
}
template <typename T>
class EigSelTest : public ::testing::TestWithParam<EigSelInputs<T>> {
protected:
void SetUp() override {
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(
new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<EigSelInputs<T>>::GetParam();
int len = params.len;
allocate(cov_matrix, len);
T cov_matrix_h[] = {1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81,
0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
updateDevice(cov_matrix, cov_matrix_h, len, stream);
allocate(eig_vectors, 12);
allocate(eig_vals, params.n_col);
T eig_vectors_ref_h[] = {-0.5123, 0.4874, 0.4874, -0.5123, 0.6498, 0.2789,
-0.2789, -0.6498, 0.4874, 0.5123, 0.5123, 0.4874};
T eig_vals_ref_h[] = {0.1024, 0.3096, 3.5266, 3.5266};
allocate(eig_vectors_ref, 12);
allocate(eig_vals_ref, params.n_col);
updateDevice(eig_vectors_ref, eig_vectors_ref_h, 12, stream);
updateDevice(eig_vals_ref, eig_vals_ref_h, 4, stream);
eigSelDC(cov_matrix, params.n_row, params.n_col, 3, eig_vectors, eig_vals,
EigVecMemUsage::OVERWRITE_INPUT, cusolverH, stream, allocator);
}
void TearDown() override {
CUDA_CHECK(cudaFree(cov_matrix));
CUDA_CHECK(cudaFree(eig_vectors));
CUDA_CHECK(cudaFree(eig_vals));
CUDA_CHECK(cudaFree(eig_vectors_ref));
CUDA_CHECK(cudaFree(eig_vals_ref));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
EigSelInputs<T> params;
T *cov_matrix, *eig_vectors, *eig_vectors_ref, *eig_vals, *eig_vals_ref;
cusolverDnHandle_t cusolverH = NULL;
cudaStream_t stream;
};
const std::vector<EigSelInputs<float>> inputsf2 = {
{0.001f, 4 * 4, 4, 4, 1234ULL, 256}};
const std::vector<EigSelInputs<double>> inputsd2 = {
{0.001, 4 * 4, 4, 4, 1234ULL, 256}};
typedef EigSelTest<float> EigSelTestValF;
TEST_P(EigSelTestValF, Result) {
ASSERT_TRUE(devArrMatch(eig_vals_ref, eig_vals, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef EigSelTest<double> EigSelTestValD;
TEST_P(EigSelTestValD, Result) {
ASSERT_TRUE(devArrMatch(eig_vals_ref, eig_vals, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef EigSelTest<float> EigSelTestVecF;
TEST_P(EigSelTestVecF, Result) {
ASSERT_TRUE(devArrMatch(eig_vectors_ref, eig_vectors, 12,
CompareApproxAbs<float>(params.tolerance)));
}
typedef EigSelTest<double> EigSelTestVecD;
TEST_P(EigSelTestVecD, Result) {
ASSERT_TRUE(devArrMatch(eig_vectors_ref, eig_vectors, 12,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestValD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(EigSelTest, EigSelTestVecD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
#endif
|
023bd9c431c48373a1688faba3f943c54ae23f63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// HPSC Final Report
// Name: Nishanth Baskaran
// Student id: 19M15017
#include <iostream>
#include <vector>
#include <chrono>
#include "solver.h"
using namespace std;
int main()
{
const int nx = 41; const int ny = 41;
const int nt = 700; const int nit = 50;
const double L = 2.0;
const double dx = L/(nx-1);
const double dy = L/(ny-1);
const double rho = 1.0;
const double nu = 0.1;
const double dt = 0.001;
int size = nx*ny*sizeof(double);
double *u, *un, *v, *vn, *p, *pn, *b;
hipMallocManaged(&u,size); hipMallocManaged(&un,size);
hipMallocManaged(&v,size); hipMallocManaged(&vn,size);
hipMallocManaged(&p,size); hipMallocManaged(&pn,size);
hipMallocManaged(&b,size);
initialize(u,un,v,vn,p,pn,b,nx,ny);
dim3 threadsPerBlock(128,1);
dim3 blockNumber ((nx+threadsPerBlock.x - 1)/threadsPerBlock.x, (ny+threadsPerBlock.y - 1)/threadsPerBlock.y);
auto t_initial = chrono::steady_clock::now();
for (int iter = 0; iter < nt; iter++)
{
hipLaunchKernelGGL(( build_up_b), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, b,u,v,rho,dt,dx,dy,nx,ny);
hipDeviceSynchronize();
for (int p_iter = 0; p_iter < nit; p_iter++)
{
hipLaunchKernelGGL(( pressure_poisson), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, p,pn,b,rho,dt,dx,dy,nx,ny);
hipLaunchKernelGGL(( boundary_pressure), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, p,nx,ny);
hipLaunchKernelGGL(( copy_function), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, pn,p,nx,ny);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( velocity_solver), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, u,un,v,vn,p,pn,b,rho,nu,dt,dx,dy,nx,ny);
hipLaunchKernelGGL(( boundary_velocity), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, u,v,nx,ny);
hipLaunchKernelGGL(( copy_function), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, un,u,nx,ny);
hipLaunchKernelGGL(( copy_function), dim3(blockNumber),dim3(threadsPerBlock), 0, 0, vn,v,nx,ny);
hipDeviceSynchronize();
}
auto t_final = chrono::steady_clock::now();
double time = chrono::duration<double>(t_final-t_initial).count();
cout << "time = " << time << endl;
save_result(u,v,p,nx,ny);
hipFree(u); hipFree(un); hipFree(v); hipFree(vn); hipFree(p); hipFree(pn); hipFree(b);
return 0;
}
|
023bd9c431c48373a1688faba3f943c54ae23f63.cu
|
// HPSC Final Report
// Name: Nishanth Baskaran
// Student id: 19M15017
#include <iostream>
#include <vector>
#include <chrono>
#include "solver.h"
using namespace std;
int main()
{
const int nx = 41; const int ny = 41;
const int nt = 700; const int nit = 50;
const double L = 2.0;
const double dx = L/(nx-1);
const double dy = L/(ny-1);
const double rho = 1.0;
const double nu = 0.1;
const double dt = 0.001;
int size = nx*ny*sizeof(double);
double *u, *un, *v, *vn, *p, *pn, *b;
cudaMallocManaged(&u,size); cudaMallocManaged(&un,size);
cudaMallocManaged(&v,size); cudaMallocManaged(&vn,size);
cudaMallocManaged(&p,size); cudaMallocManaged(&pn,size);
cudaMallocManaged(&b,size);
initialize(u,un,v,vn,p,pn,b,nx,ny);
dim3 threadsPerBlock(128,1);
dim3 blockNumber ((nx+threadsPerBlock.x - 1)/threadsPerBlock.x, (ny+threadsPerBlock.y - 1)/threadsPerBlock.y);
auto t_initial = chrono::steady_clock::now();
for (int iter = 0; iter < nt; iter++)
{
build_up_b<<<blockNumber,threadsPerBlock>>>(b,u,v,rho,dt,dx,dy,nx,ny);
cudaDeviceSynchronize();
for (int p_iter = 0; p_iter < nit; p_iter++)
{
pressure_poisson<<<blockNumber,threadsPerBlock>>>(p,pn,b,rho,dt,dx,dy,nx,ny);
boundary_pressure<<<blockNumber,threadsPerBlock>>>(p,nx,ny);
copy_function<<<blockNumber,threadsPerBlock>>>(pn,p,nx,ny);
cudaDeviceSynchronize();
}
velocity_solver<<<blockNumber,threadsPerBlock>>>(u,un,v,vn,p,pn,b,rho,nu,dt,dx,dy,nx,ny);
boundary_velocity<<<blockNumber,threadsPerBlock>>>(u,v,nx,ny);
copy_function<<<blockNumber,threadsPerBlock>>>(un,u,nx,ny);
copy_function<<<blockNumber,threadsPerBlock>>>(vn,v,nx,ny);
cudaDeviceSynchronize();
}
auto t_final = chrono::steady_clock::now();
double time = chrono::duration<double>(t_final-t_initial).count();
cout << "time = " << time << endl;
save_result(u,v,p,nx,ny);
cudaFree(u); cudaFree(un); cudaFree(v); cudaFree(vn); cudaFree(p); cudaFree(pn); cudaFree(b);
return 0;
}
|
3418e19617e21bf98dab274e9082c86545e04364.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T, typename S>
__global__ void GatherV2Kernel(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1) {
size_t num = output_dim0 * output_dim1 * output_dim2;
size_t i, j, k;
for (size_t write_index = blockIdx.x * blockDim.x + threadIdx.x; write_index < num;
write_index += blockDim.x * gridDim.x) {
i = write_index / (output_dim1 * output_dim2) % output_dim0;
j = write_index / output_dim2 % output_dim1;
k = write_index % output_dim2;
if ((indices[j] >= 0) && (indices[j] < input_dim1)) {
size_t read_index = i * input_dim1 * output_dim2 + indices[j] * output_dim2 + k;
output[write_index] = input[read_index];
} else {
output[write_index] = 0;
}
}
return;
}
template <typename T, typename S>
void GatherV2(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2,
size_t input_dim1, hipStream_t stream) {
size_t size = output_dim0 * output_dim1 * output_dim2;
hipLaunchKernelGGL(( GatherV2Kernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, stream, input, indices, output, output_dim0, output_dim1,
output_dim2, input_dim1);
return;
}
template void GatherV2<float, int>(float *input, int *indices, float *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<float, int64_t>(float *input, int64_t *indices, float *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<half, int>(half *input, int *indices, half *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<half, int64_t>(half *input, int64_t *indices, half *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<double, int>(double *input, int *indices, double *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<double, int64_t>(double *input, int64_t *indices, double *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<int, int>(int *input, int *indices, int *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<int, int64_t>(int *input, int64_t *indices, int *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<int16_t, int>(int16_t *input, int *indices, int16_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<int16_t, int64_t>(int16_t *input, int64_t *indices, int16_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1,
hipStream_t stream);
template void GatherV2<int8_t, int>(int8_t *input, int *indices, int8_t *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<int8_t, int64_t>(int8_t *input, int64_t *indices, int8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<uint8_t, int>(uint8_t *input, int *indices, uint8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, hipStream_t stream);
template void GatherV2<uint8_t, int64_t>(uint8_t *input, int64_t *indices, uint8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1,
hipStream_t stream);
|
3418e19617e21bf98dab274e9082c86545e04364.cu
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh"
#include "runtime/device/gpu/cuda_common.h"
template <typename T, typename S>
__global__ void GatherV2Kernel(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1) {
size_t num = output_dim0 * output_dim1 * output_dim2;
size_t i, j, k;
for (size_t write_index = blockIdx.x * blockDim.x + threadIdx.x; write_index < num;
write_index += blockDim.x * gridDim.x) {
i = write_index / (output_dim1 * output_dim2) % output_dim0;
j = write_index / output_dim2 % output_dim1;
k = write_index % output_dim2;
if ((indices[j] >= 0) && (indices[j] < input_dim1)) {
size_t read_index = i * input_dim1 * output_dim2 + indices[j] * output_dim2 + k;
output[write_index] = input[read_index];
} else {
output[write_index] = 0;
}
}
return;
}
template <typename T, typename S>
void GatherV2(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2,
size_t input_dim1, cudaStream_t stream) {
size_t size = output_dim0 * output_dim1 * output_dim2;
GatherV2Kernel<<<GET_BLOCKS(size), GET_THREADS, 0, stream>>>(input, indices, output, output_dim0, output_dim1,
output_dim2, input_dim1);
return;
}
template void GatherV2<float, int>(float *input, int *indices, float *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<float, int64_t>(float *input, int64_t *indices, float *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<half, int>(half *input, int *indices, half *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<half, int64_t>(half *input, int64_t *indices, half *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<double, int>(double *input, int *indices, double *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<double, int64_t>(double *input, int64_t *indices, double *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<int, int>(int *input, int *indices, int *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<int, int64_t>(int *input, int64_t *indices, int *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<int16_t, int>(int16_t *input, int *indices, int16_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<int16_t, int64_t>(int16_t *input, int64_t *indices, int16_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1,
cudaStream_t stream);
template void GatherV2<int8_t, int>(int8_t *input, int *indices, int8_t *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<int8_t, int64_t>(int8_t *input, int64_t *indices, int8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<uint8_t, int>(uint8_t *input, int *indices, uint8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream);
template void GatherV2<uint8_t, int64_t>(uint8_t *input, int64_t *indices, uint8_t *output, size_t output_dim0,
size_t output_dim1, size_t output_dim2, size_t input_dim1,
cudaStream_t stream);
|
345ff9c97f325cc9f7a015480f95aaf93b909c3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <paddle/fluid/memory/allocation/allocator.h>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/detection/bbox_util.h"
#include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/mixed_vector.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 64;
static constexpr int kNumMaxinumNumBlocks = 4096;
const int kBBoxSize = 4;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
static __global__ void GetLengthLoD(const int nthreads,
const int* batch_ids,
int* length_lod) {
CUDA_KERNEL_LOOP(i, nthreads) {
phi::CudaAtomicAdd(length_lod + batch_ids[i], 1);
}
}
template <typename DeviceContext, typename T>
class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto roi_ins = ctx.MultiInput<phi::DenseTensor>("MultiLevelRois");
const auto score_ins = ctx.MultiInput<phi::DenseTensor>("MultiLevelScores");
auto fpn_rois = ctx.Output<phi::DenseTensor>("FpnRois");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
const int post_nms_topN = ctx.Attr<int>("post_nms_topN");
// concat inputs along axis = 0
int roi_offset = 0;
int score_offset = 0;
int total_roi_num = 0;
for (size_t i = 0; i < roi_ins.size(); ++i) {
total_roi_num += roi_ins[i]->dims()[0];
}
int real_post_num = min(post_nms_topN, total_roi_num);
fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
phi::DenseTensor concat_rois;
phi::DenseTensor concat_scores;
T* concat_rois_data = concat_rois.mutable_data<T>(
{total_roi_num, kBBoxSize}, dev_ctx.GetPlace());
T* concat_scores_data =
concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace());
phi::DenseTensor roi_batch_id_list;
roi_batch_id_list.Resize({total_roi_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
int index = 0;
int lod_size;
auto place = dev_ctx.GetPlace();
auto multi_rois_num = ctx.MultiInput<phi::DenseTensor>("MultiLevelRoIsNum");
for (size_t i = 0; i < roi_ins.size(); ++i) {
auto roi_in = roi_ins[i];
auto score_in = score_ins[i];
if (multi_rois_num.size() > 0) {
phi::DenseTensor temp;
paddle::framework::TensorCopySync(
*multi_rois_num[i], platform::CPUPlace(), &temp);
const int* length_in = temp.data<int>();
lod_size = multi_rois_num[i]->numel();
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = 0; j < length_in[n]; ++j) {
roi_batch_id_data[index++] = n;
}
}
} else {
auto length_in = roi_in->lod().back();
lod_size = length_in.size() - 1;
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) {
roi_batch_id_data[index++] = n;
}
}
}
memory::Copy(place,
concat_rois_data + roi_offset,
place,
roi_in->data<T>(),
roi_in->numel() * sizeof(T),
dev_ctx.stream());
memory::Copy(place,
concat_scores_data + score_offset,
place,
score_in->data<T>(),
score_in->numel() * sizeof(T),
dev_ctx.stream());
roi_offset += roi_in->numel();
score_offset += score_in->numel();
}
// copy batch id list to GPU
phi::DenseTensor roi_batch_id_list_gpu;
framework::TensorCopy(
roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu);
phi::DenseTensor index_in_t;
int* idx_in =
index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
platform::ForRange<phi::GPUContext> for_range_total(dev_ctx, total_roi_num);
for_range_total(RangeInitFunctor{0, 1, idx_in});
phi::DenseTensor keys_out_t;
T* keys_out =
keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace());
phi::DenseTensor index_out_t;
int* idx_out =
index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr,
temp_storage_bytes,
concat_scores.data<T>(),
keys_out,
idx_in,
idx_out,
total_roi_num,
0,
sizeof(T) * 8,
dev_ctx.stream());
// Allocate temporary storage
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort score to get corresponding index
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(),
temp_storage_bytes,
concat_scores.data<T>(),
keys_out,
idx_in,
idx_out,
total_roi_num,
0,
sizeof(T) * 8,
dev_ctx.stream());
index_out_t.Resize({real_post_num});
phi::DenseTensor sorted_rois;
sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
phi::DenseTensor sorted_batch_id;
sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
phi::funcs::GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois);
phi::funcs::GPUGather<int>(
dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id);
phi::DenseTensor batch_index_t;
int* batch_idx_in =
batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
platform::ForRange<phi::GPUContext> for_range_post(dev_ctx, real_post_num);
for_range_post(RangeInitFunctor{0, 1, batch_idx_in});
phi::DenseTensor out_id_t;
int* out_id_data =
out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs<int, int>(nullptr,
temp_storage_bytes,
sorted_batch_id.data<int>(),
out_id_data,
batch_idx_in,
index_out_t.data<int>(),
real_post_num,
0,
sizeof(int) * 8,
dev_ctx.stream());
// Allocate temporary storage
d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort batch_id to get corresponding index
hipcub::DeviceRadixSort::SortPairs<int, int>(d_temp_storage->ptr(),
temp_storage_bytes,
sorted_batch_id.data<int>(),
out_id_data,
batch_idx_in,
index_out_t.data<int>(),
real_post_num,
0,
sizeof(int) * 8,
dev_ctx.stream());
phi::funcs::GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois);
phi::DenseTensor length_lod;
int* length_lod_data =
length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace());
phi::funcs::SetConstant<phi::GPUContext, int> set_zero;
set_zero(dev_ctx, &length_lod, static_cast<int>(0));
int blocks = NumBlocks(real_post_num);
int threads = kNumCUDAThreads;
// get length-based lod by batch ids
hipLaunchKernelGGL(( GetLengthLoD), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
real_post_num, out_id_data, length_lod_data);
std::vector<int> length_lod_cpu(lod_size);
memory::Copy(platform::CPUPlace(),
length_lod_cpu.data(),
place,
length_lod_data,
sizeof(int) * lod_size,
dev_ctx.stream());
dev_ctx.Wait();
std::vector<size_t> offset(1, 0);
for (int i = 0; i < lod_size; ++i) {
offset.emplace_back(offset.back() + length_lod_cpu[i]);
}
if (ctx.HasOutput("RoisNum")) {
auto* rois_num = ctx.Output<phi::DenseTensor>("RoisNum");
int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place);
memory::Copy(place,
rois_num_data,
place,
length_lod_data,
lod_size * sizeof(int),
dev_ctx.stream());
}
framework::LoD lod;
lod.emplace_back(offset);
fpn_rois->set_lod(lod);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
collect_fpn_proposals,
ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, float>,
ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, double>);
|
345ff9c97f325cc9f7a015480f95aaf93b909c3f.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <paddle/fluid/memory/allocation/allocator.h>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/detection/bbox_util.h"
#include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/mixed_vector.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/strided_memcpy.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 64;
static constexpr int kNumMaxinumNumBlocks = 4096;
const int kBBoxSize = 4;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
static __global__ void GetLengthLoD(const int nthreads,
const int* batch_ids,
int* length_lod) {
CUDA_KERNEL_LOOP(i, nthreads) {
phi::CudaAtomicAdd(length_lod + batch_ids[i], 1);
}
}
template <typename DeviceContext, typename T>
class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto roi_ins = ctx.MultiInput<phi::DenseTensor>("MultiLevelRois");
const auto score_ins = ctx.MultiInput<phi::DenseTensor>("MultiLevelScores");
auto fpn_rois = ctx.Output<phi::DenseTensor>("FpnRois");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
const int post_nms_topN = ctx.Attr<int>("post_nms_topN");
// concat inputs along axis = 0
int roi_offset = 0;
int score_offset = 0;
int total_roi_num = 0;
for (size_t i = 0; i < roi_ins.size(); ++i) {
total_roi_num += roi_ins[i]->dims()[0];
}
int real_post_num = min(post_nms_topN, total_roi_num);
fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
phi::DenseTensor concat_rois;
phi::DenseTensor concat_scores;
T* concat_rois_data = concat_rois.mutable_data<T>(
{total_roi_num, kBBoxSize}, dev_ctx.GetPlace());
T* concat_scores_data =
concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace());
phi::DenseTensor roi_batch_id_list;
roi_batch_id_list.Resize({total_roi_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
int index = 0;
int lod_size;
auto place = dev_ctx.GetPlace();
auto multi_rois_num = ctx.MultiInput<phi::DenseTensor>("MultiLevelRoIsNum");
for (size_t i = 0; i < roi_ins.size(); ++i) {
auto roi_in = roi_ins[i];
auto score_in = score_ins[i];
if (multi_rois_num.size() > 0) {
phi::DenseTensor temp;
paddle::framework::TensorCopySync(
*multi_rois_num[i], platform::CPUPlace(), &temp);
const int* length_in = temp.data<int>();
lod_size = multi_rois_num[i]->numel();
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = 0; j < length_in[n]; ++j) {
roi_batch_id_data[index++] = n;
}
}
} else {
auto length_in = roi_in->lod().back();
lod_size = length_in.size() - 1;
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) {
roi_batch_id_data[index++] = n;
}
}
}
memory::Copy(place,
concat_rois_data + roi_offset,
place,
roi_in->data<T>(),
roi_in->numel() * sizeof(T),
dev_ctx.stream());
memory::Copy(place,
concat_scores_data + score_offset,
place,
score_in->data<T>(),
score_in->numel() * sizeof(T),
dev_ctx.stream());
roi_offset += roi_in->numel();
score_offset += score_in->numel();
}
// copy batch id list to GPU
phi::DenseTensor roi_batch_id_list_gpu;
framework::TensorCopy(
roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu);
phi::DenseTensor index_in_t;
int* idx_in =
index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
platform::ForRange<phi::GPUContext> for_range_total(dev_ctx, total_roi_num);
for_range_total(RangeInitFunctor{0, 1, idx_in});
phi::DenseTensor keys_out_t;
T* keys_out =
keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace());
phi::DenseTensor index_out_t;
int* idx_out =
index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr,
temp_storage_bytes,
concat_scores.data<T>(),
keys_out,
idx_in,
idx_out,
total_roi_num,
0,
sizeof(T) * 8,
dev_ctx.stream());
// Allocate temporary storage
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort score to get corresponding index
cub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(),
temp_storage_bytes,
concat_scores.data<T>(),
keys_out,
idx_in,
idx_out,
total_roi_num,
0,
sizeof(T) * 8,
dev_ctx.stream());
index_out_t.Resize({real_post_num});
phi::DenseTensor sorted_rois;
sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
phi::DenseTensor sorted_batch_id;
sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
phi::funcs::GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois);
phi::funcs::GPUGather<int>(
dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id);
phi::DenseTensor batch_index_t;
int* batch_idx_in =
batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
platform::ForRange<phi::GPUContext> for_range_post(dev_ctx, real_post_num);
for_range_post(RangeInitFunctor{0, 1, batch_idx_in});
phi::DenseTensor out_id_t;
int* out_id_data =
out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs<int, int>(nullptr,
temp_storage_bytes,
sorted_batch_id.data<int>(),
out_id_data,
batch_idx_in,
index_out_t.data<int>(),
real_post_num,
0,
sizeof(int) * 8,
dev_ctx.stream());
// Allocate temporary storage
d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort batch_id to get corresponding index
cub::DeviceRadixSort::SortPairs<int, int>(d_temp_storage->ptr(),
temp_storage_bytes,
sorted_batch_id.data<int>(),
out_id_data,
batch_idx_in,
index_out_t.data<int>(),
real_post_num,
0,
sizeof(int) * 8,
dev_ctx.stream());
phi::funcs::GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois);
phi::DenseTensor length_lod;
int* length_lod_data =
length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace());
phi::funcs::SetConstant<phi::GPUContext, int> set_zero;
set_zero(dev_ctx, &length_lod, static_cast<int>(0));
int blocks = NumBlocks(real_post_num);
int threads = kNumCUDAThreads;
// get length-based lod by batch ids
GetLengthLoD<<<blocks, threads, 0, dev_ctx.stream()>>>(
real_post_num, out_id_data, length_lod_data);
std::vector<int> length_lod_cpu(lod_size);
memory::Copy(platform::CPUPlace(),
length_lod_cpu.data(),
place,
length_lod_data,
sizeof(int) * lod_size,
dev_ctx.stream());
dev_ctx.Wait();
std::vector<size_t> offset(1, 0);
for (int i = 0; i < lod_size; ++i) {
offset.emplace_back(offset.back() + length_lod_cpu[i]);
}
if (ctx.HasOutput("RoisNum")) {
auto* rois_num = ctx.Output<phi::DenseTensor>("RoisNum");
int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place);
memory::Copy(place,
rois_num_data,
place,
length_lod_data,
lod_size * sizeof(int),
dev_ctx.stream());
}
framework::LoD lod;
lod.emplace_back(offset);
fpn_rois->set_lod(lod);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
collect_fpn_proposals,
ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, float>,
ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, double>);
|
20fc104019c300b87fee2d232b4a84c90d1863f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define kDataLen 1000
__global__ void child(float *z, int a, int *b, bool c, bool *d, float e,
float *f, unsigned g, unsigned *h, long i, long *j) {
z[threadIdx.x] += 1;
}
__global__ void parent(float* x, float* y, float *z) {
z[threadIdx.x] += y[threadIdx.x] + x[threadIdx.x];
int a;
int *b;
bool c;
bool *d;
float e;
float *f;
unsigned g;
unsigned *h;
long i;
long *j;
hipLaunchKernelGGL(( child), dim3(1), dim3(kDataLen), 0, 0, z, a, b, c, d, e, f, g, h, i, j);
}
int main(int argc, char* argv[]) {
float host_a[kDataLen];
float host_b[kDataLen];
float host_c[kDataLen];
for (int i=0; i < kDataLen; i++) {
host_a[i] = i;
host_b[i] = 2*i;
}
// Copy input data to device.
float* device_a;
float* device_b;
float* device_c;
hipMalloc(&device_a, kDataLen * sizeof(float));
hipMalloc(&device_b, kDataLen * sizeof(float));
hipMalloc(&device_c, kDataLen * sizeof(float));
hipMemcpy(device_a, host_a, kDataLen * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, kDataLen * sizeof(float),
hipMemcpyHostToDevice);
// Launch the kernel.
hipLaunchKernelGGL(( parent), dim3(5), dim3(kDataLen/5), 0, 0, device_a, device_b, device_c);
// Copy output data to host.
hipDeviceSynchronize();
hipMemcpy(host_c, device_c, kDataLen * sizeof(float),
hipMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_c[i] << "\n";
}
hipDeviceReset();
return 0;
}
|
20fc104019c300b87fee2d232b4a84c90d1863f6.cu
|
#include <iostream>
#define kDataLen 1000
__global__ void child(float *z, int a, int *b, bool c, bool *d, float e,
float *f, unsigned g, unsigned *h, long i, long *j) {
z[threadIdx.x] += 1;
}
__global__ void parent(float* x, float* y, float *z) {
z[threadIdx.x] += y[threadIdx.x] + x[threadIdx.x];
int a;
int *b;
bool c;
bool *d;
float e;
float *f;
unsigned g;
unsigned *h;
long i;
long *j;
child<<<1, kDataLen>>>(z, a, b, c, d, e, f, g, h, i, j);
}
int main(int argc, char* argv[]) {
float host_a[kDataLen];
float host_b[kDataLen];
float host_c[kDataLen];
for (int i=0; i < kDataLen; i++) {
host_a[i] = i;
host_b[i] = 2*i;
}
// Copy input data to device.
float* device_a;
float* device_b;
float* device_c;
cudaMalloc(&device_a, kDataLen * sizeof(float));
cudaMalloc(&device_b, kDataLen * sizeof(float));
cudaMalloc(&device_c, kDataLen * sizeof(float));
cudaMemcpy(device_a, host_a, kDataLen * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, kDataLen * sizeof(float),
cudaMemcpyHostToDevice);
// Launch the kernel.
parent<<<5, kDataLen/5>>>(device_a, device_b, device_c);
// Copy output data to host.
cudaDeviceSynchronize();
cudaMemcpy(host_c, device_c, kDataLen * sizeof(float),
cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_c[i] << "\n";
}
cudaDeviceReset();
return 0;
}
|
9ea24dfdadb2bfcfe70656b7bfb4996d377012e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at::native {
constexpr char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T> T acos(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
|
9ea24dfdadb2bfcfe70656b7bfb4996d377012e8.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
constexpr char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T> T acos(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
|
a9a6f2703a50b735bca6ad7b5b95976b4f90e991.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "blas.h"
#include <algorithm>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void axpby_kernel( const int n,
const double a,
const double *x,
const double b,
const double *y,
double *z)
{
// One thread per row.
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Iterate over the rows of the matrix.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
double my_x = x[idx];
double my_y = y[idx];
//if( idx < 4 )
// printf( "axpby: [ %12.8f %12.8f ]\n", my_x, my_y );
// Store the results.
z[idx] = a*my_x + b*my_y;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void axpbypcz_kernel( const int n,
const double a,
const double *x,
const double b,
const double *y,
const double c,
const double *z,
double *w)
{
// One thread per row.
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Iterate over the rows of the matrix.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
double my_x = x[idx];
double my_y = y[idx];
double my_z = z[idx];
// Store the results.
w[idx] = a*my_x + b*my_y + c*my_z;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void dot_kernel_v0( const int n, const double *__restrict x, const double *__restrict y, double *res )
{
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Shared memory to compute the block reduction.
__shared__ volatile double smem[BLOCK_SIZE];
// My dot values.
double my_x, my_y, my_res = 0.0;
// Serial reduction.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
my_x = x[idx];
my_y = y[idx];
// Update my local value.
my_res += my_x*my_y;
}
// Store the result in SMEM.
smem[threadIdx.x] = my_res;
// Make sure all threads have written their values.
__syncthreads();
// Block-wide reduction.
for( int offset = BLOCK_SIZE / 2 ; offset > 0 ; offset /= 2 )
{
if( threadIdx.x < offset )
smem[threadIdx.x] = my_res += smem[threadIdx.x + offset];
__syncthreads();
}
// Store the result.
if( threadIdx.x == 0 )
res[blockIdx.x] = my_res;
}
// --------------------------------------------------------------------------------------------------------------------
template< int BLOCK_SIZE >
__global__
void dot_kernel_v1( const int n, const double *__restrict x, const double *__restrict y, double *res )
{
const int NUM_WARPS = BLOCK_SIZE / WARP_SIZE;
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
double my_x, my_y, my_res = 0.0;
// Thread coordinates in the block.
const int warp_id = warpid();
const int lane_id = laneid();
// Shared memory to compute the block reduction.
__shared__ volatile double smem[BLOCK_SIZE + NUM_WARPS];
// Serial reduction.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
my_x = x[idx];
my_y = y[idx];
// Update my local value.
my_res += my_x*my_y;
}
// Store the result in SMEM.
smem[threadIdx.x] = my_res;
for( int offset = WARP_SIZE / 2 ; offset > 0 ; offset >>= 1 )
if( lane_id < offset )
smem[threadIdx.x] = my_res += smem[threadIdx.x+offset];
// Make sure all threads have written their values.
if( lane_id == 0 )
smem[BLOCK_SIZE+warp_id] = my_res;
__syncthreads();
// First warp reduction.
if( threadIdx.x < NUM_WARPS )
my_res = smem[BLOCK_SIZE + threadIdx.x];
for( int offset = NUM_WARPS / 2 ; offset > 0 ; offset >>= 1 )
if( threadIdx.x < offset )
smem[BLOCK_SIZE + threadIdx.x] = my_res += smem[BLOCK_SIZE + threadIdx.x + offset];
// Store the result.
if( threadIdx.x == 0 )
res[blockIdx.x] = my_res;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void reduce_kernel( int n, const double *x, double *res )
{
const int NUM_WARPS = BLOCK_SIZE / WARP_SIZE;
const int warp_id = warpid();
const int lane_id = laneid();
__shared__ volatile double smem[BLOCK_SIZE + NUM_WARPS];
double my_x = 0.0;
if( threadIdx.x < n )
my_x = x[threadIdx.x];
smem[threadIdx.x] = my_x;
for( int offset = WARP_SIZE / 2 ; offset > 0 ; offset >>= 1 )
if( lane_id < offset )
smem[threadIdx.x] = my_x += smem[threadIdx.x+offset];
if( lane_id == 0 )
smem[BLOCK_SIZE + warp_id] = my_x;
__syncthreads();
if( threadIdx.x < NUM_WARPS / 2 )
my_x = smem[BLOCK_SIZE + threadIdx.x];
for( int offset = NUM_WARPS / 2 ; offset > 0 ; offset >>= 1 )
if( threadIdx.x < offset )
smem[BLOCK_SIZE+threadIdx.x] = my_x += smem[BLOCK_SIZE + threadIdx.x+offset];
if( threadIdx.x == 0 )
res[0] = my_x;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum { BLOCK_SIZE = 256 };
void axpby(Context *ctx, int n, double a, const double *x,
double b, const double *y,
double *z)
{
const int grid_size = ::min( (int) MAX_GRID_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
hipLaunchKernelGGL(( axpby_kernel<BLOCK_SIZE>), dim3(grid_size), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), 4*n, a, x, b, y, z);
CUDA_SAFE_CALL( hipGetLastError() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void axpbypcz(Context *ctx, int n, double a, const double *x,
double b, const double *y,
double c, const double *z,
double *w)
{
const int grid_size = ::min( (int) MAX_GRID_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
hipLaunchKernelGGL(( axpbypcz_kernel<BLOCK_SIZE>), dim3(grid_size), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), 4*n, a, x, b, y, c, z, w);
CUDA_SAFE_CALL( hipGetLastError() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void dot( Context *ctx, int n, const double *x, const double *y, double *res, double *wk )
{
bool local_wk = wk == NULL;
if( local_wk )
{
CUDA_SAFE_CALL( hipMalloc((void**) &wk, BLOCK_SIZE*sizeof(double)) );
}
int grid_size = 0;
switch(ctx->dot)
{
case 0:
grid_size = ::min( (int) BLOCK_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
hipLaunchKernelGGL(( dot_kernel_v0<BLOCK_SIZE>), dim3(grid_size), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), 4*n, x, y, wk);
CUDA_SAFE_CALL( hipGetLastError() );
hipLaunchKernelGGL(( reduce_kernel<BLOCK_SIZE>), dim3(1), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), grid_size, wk, res);
CUDA_SAFE_CALL( hipGetLastError() );
break;
case 1:
grid_size = ::min( (int) BLOCK_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
hipLaunchKernelGGL(( dot_kernel_v1<BLOCK_SIZE>), dim3(grid_size), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), 4*n, x, y, wk);
CUDA_SAFE_CALL( hipGetLastError() );
hipLaunchKernelGGL(( reduce_kernel<BLOCK_SIZE>), dim3(1), dim3(BLOCK_SIZE), 0, ctx->get_stream(0), grid_size, wk, res);
CUDA_SAFE_CALL( hipGetLastError() );
break;
default:
std::fprintf(stderr, "Invalid version for dot kernel=%d, valid values=[0,1]\n", ctx->dot);
std::exit(1);
}
if( local_wk )
{
CUDA_SAFE_CALL( hipFree(wk) );
wk = NULL;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
a9a6f2703a50b735bca6ad7b5b95976b4f90e991.cu
|
#include "blas.h"
#include <algorithm>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void axpby_kernel( const int n,
const double a,
const double *x,
const double b,
const double *y,
double *z)
{
// One thread per row.
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Iterate over the rows of the matrix.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
double my_x = x[idx];
double my_y = y[idx];
//if( idx < 4 )
// printf( "axpby: [ %12.8f %12.8f ]\n", my_x, my_y );
// Store the results.
z[idx] = a*my_x + b*my_y;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void axpbypcz_kernel( const int n,
const double a,
const double *x,
const double b,
const double *y,
const double c,
const double *z,
double *w)
{
// One thread per row.
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Iterate over the rows of the matrix.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
double my_x = x[idx];
double my_y = y[idx];
double my_z = z[idx];
// Store the results.
w[idx] = a*my_x + b*my_y + c*my_z;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void dot_kernel_v0( const int n, const double *__restrict x, const double *__restrict y, double *res )
{
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
// Shared memory to compute the block reduction.
__shared__ volatile double smem[BLOCK_SIZE];
// My dot values.
double my_x, my_y, my_res = 0.0;
// Serial reduction.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
my_x = x[idx];
my_y = y[idx];
// Update my local value.
my_res += my_x*my_y;
}
// Store the result in SMEM.
smem[threadIdx.x] = my_res;
// Make sure all threads have written their values.
__syncthreads();
// Block-wide reduction.
for( int offset = BLOCK_SIZE / 2 ; offset > 0 ; offset /= 2 )
{
if( threadIdx.x < offset )
smem[threadIdx.x] = my_res += smem[threadIdx.x + offset];
__syncthreads();
}
// Store the result.
if( threadIdx.x == 0 )
res[blockIdx.x] = my_res;
}
// --------------------------------------------------------------------------------------------------------------------
template< int BLOCK_SIZE >
__global__
void dot_kernel_v1( const int n, const double *__restrict x, const double *__restrict y, double *res )
{
const int NUM_WARPS = BLOCK_SIZE / WARP_SIZE;
int idx = blockIdx.x*BLOCK_SIZE + threadIdx.x;
double my_x, my_y, my_res = 0.0;
// Thread coordinates in the block.
const int warp_id = warpid();
const int lane_id = laneid();
// Shared memory to compute the block reduction.
__shared__ volatile double smem[BLOCK_SIZE + NUM_WARPS];
// Serial reduction.
for( ; idx < n ; idx += BLOCK_SIZE*gridDim.x )
{
// Load x and y.
my_x = x[idx];
my_y = y[idx];
// Update my local value.
my_res += my_x*my_y;
}
// Store the result in SMEM.
smem[threadIdx.x] = my_res;
for( int offset = WARP_SIZE / 2 ; offset > 0 ; offset >>= 1 )
if( lane_id < offset )
smem[threadIdx.x] = my_res += smem[threadIdx.x+offset];
// Make sure all threads have written their values.
if( lane_id == 0 )
smem[BLOCK_SIZE+warp_id] = my_res;
__syncthreads();
// First warp reduction.
if( threadIdx.x < NUM_WARPS )
my_res = smem[BLOCK_SIZE + threadIdx.x];
for( int offset = NUM_WARPS / 2 ; offset > 0 ; offset >>= 1 )
if( threadIdx.x < offset )
smem[BLOCK_SIZE + threadIdx.x] = my_res += smem[BLOCK_SIZE + threadIdx.x + offset];
// Store the result.
if( threadIdx.x == 0 )
res[blockIdx.x] = my_res;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int BLOCK_SIZE >
__global__
void reduce_kernel( int n, const double *x, double *res )
{
const int NUM_WARPS = BLOCK_SIZE / WARP_SIZE;
const int warp_id = warpid();
const int lane_id = laneid();
__shared__ volatile double smem[BLOCK_SIZE + NUM_WARPS];
double my_x = 0.0;
if( threadIdx.x < n )
my_x = x[threadIdx.x];
smem[threadIdx.x] = my_x;
for( int offset = WARP_SIZE / 2 ; offset > 0 ; offset >>= 1 )
if( lane_id < offset )
smem[threadIdx.x] = my_x += smem[threadIdx.x+offset];
if( lane_id == 0 )
smem[BLOCK_SIZE + warp_id] = my_x;
__syncthreads();
if( threadIdx.x < NUM_WARPS / 2 )
my_x = smem[BLOCK_SIZE + threadIdx.x];
for( int offset = NUM_WARPS / 2 ; offset > 0 ; offset >>= 1 )
if( threadIdx.x < offset )
smem[BLOCK_SIZE+threadIdx.x] = my_x += smem[BLOCK_SIZE + threadIdx.x+offset];
if( threadIdx.x == 0 )
res[0] = my_x;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum { BLOCK_SIZE = 256 };
void axpby(Context *ctx, int n, double a, const double *x,
double b, const double *y,
double *z)
{
const int grid_size = std::min( (int) MAX_GRID_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
axpby_kernel<BLOCK_SIZE><<<grid_size, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(4*n, a, x, b, y, z);
CUDA_SAFE_CALL( cudaGetLastError() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void axpbypcz(Context *ctx, int n, double a, const double *x,
double b, const double *y,
double c, const double *z,
double *w)
{
const int grid_size = std::min( (int) MAX_GRID_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
axpbypcz_kernel<BLOCK_SIZE><<<grid_size, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(4*n, a, x, b, y, c, z, w);
CUDA_SAFE_CALL( cudaGetLastError() );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void dot( Context *ctx, int n, const double *x, const double *y, double *res, double *wk )
{
bool local_wk = wk == NULL;
if( local_wk )
{
CUDA_SAFE_CALL( cudaMalloc((void**) &wk, BLOCK_SIZE*sizeof(double)) );
}
int grid_size = 0;
switch(ctx->dot)
{
case 0:
grid_size = std::min( (int) BLOCK_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
dot_kernel_v0<BLOCK_SIZE><<<grid_size, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(4*n, x, y, wk);
CUDA_SAFE_CALL( cudaGetLastError() );
reduce_kernel<BLOCK_SIZE><<<1, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(grid_size, wk, res);
CUDA_SAFE_CALL( cudaGetLastError() );
break;
case 1:
grid_size = std::min( (int) BLOCK_SIZE, (4*n+BLOCK_SIZE-1) / BLOCK_SIZE );
dot_kernel_v1<BLOCK_SIZE><<<grid_size, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(4*n, x, y, wk);
CUDA_SAFE_CALL( cudaGetLastError() );
reduce_kernel<BLOCK_SIZE><<<1, BLOCK_SIZE, 0, ctx->get_stream(0)>>>(grid_size, wk, res);
CUDA_SAFE_CALL( cudaGetLastError() );
break;
default:
std::fprintf(stderr, "Invalid version for dot kernel=%d, valid values=[0,1]\n", ctx->dot);
std::exit(1);
}
if( local_wk )
{
CUDA_SAFE_CALL( cudaFree(wk) );
wk = NULL;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
38455d878b0ad48d0658a02ad180c36a25cae855.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "../common/span.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
}
};
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename scan_t,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx,
common::Span<const GradientSumT> node_histogram,
common::Span<const uint32_t> feature_segments, // cut.row_ptr
float min_fvalue, // cut.min_value
common::Span<const float> gidx_fvalue_map, // cut.cut
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = feature_segments[fidx]; // begining bin
uint32_t gidx_end = feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
scan_t(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int gidx = scan_begin + threadIdx.x;
float fvalue =
gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1];
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir,
fvalue, fidx,
GradientPair(left),
GradientPair(right),
param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT>
node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
common::Span<const uint32_t>
d_feature_segments, // row_ptr form HistCutMatrix
common::Span<const float> d_fidx_min_map, // min_value
common::Span<const float> d_gidx_fvalue_map, // cut
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
typedef hipcub::KeyValuePair<int, float> ArgMaxT;
typedef hipcub::BlockScan<
GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT;
typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef hipcub::BlockReduce<GradientSumT, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram,
d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
&best_split, node, gpu_param, &temp_storage, constraint,
value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
template <typename GidxIterT>
__device__ int BinarySearchRow(bst_uint begin, bst_uint end, GidxIterT data,
int const fidx_begin, int const fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT>
struct DeviceHistogram {
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map;
thrust::device_vector<typename GradientSumT::ValueT> data;
const size_t kStopGrowingSize = 1 << 26; // Do not grow beyond this size
int n_bins;
int device_id_;
void Init(int device_id, int n_bins) {
this->n_bins = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(hipSetDevice(device_id_));
dh::safe_cuda(hipMemsetAsync(
data.data().get(), 0,
data.size() * sizeof(typename decltype(data)::value_type)));
nidx_map.clear();
}
bool HistogramExists(int nidx) {
return nidx_map.find(nidx) != nidx_map.end();
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
size_t current_size =
nidx_map.size() * n_bins * 2; // Number of items currently used in data
dh::safe_cuda(hipSetDevice(device_id_));
if (data.size() >= kStopGrowingSize) {
// Recycle histogram memory
std::pair<int, size_t> old_entry = *nidx_map.begin();
nidx_map.erase(old_entry.first);
dh::safe_cuda(hipMemsetAsync(data.data().get() + old_entry.second, 0,
n_bins * sizeof(GradientSumT)));
nidx_map[nidx] = old_entry.second;
} else {
// Append new node histogram
nidx_map[nidx] = current_size;
if (data.size() < current_size + n_bins * 2) {
size_t new_size = current_size * 2; // Double in size
new_size = ::max(static_cast<size_t>(n_bins * 2),
new_size); // Have at least one histogram
data.resize(new_size);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data.data().get() + nidx_map[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Bin each input data entry, store the bin indices in compressed form.
__global__ void compress_bin_ellpack_k(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistCutMatrix::cut
const uint32_t* __restrict__ cut_rows, // HistCutMatrix::row_ptrs
size_t base_row, // batch_row_begin
size_t n_rows,
// row_ptr_begin: row_offset[base_row], the start position of base_row
size_t row_ptr_begin,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride)
return;
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
bin = dh::UpperBound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts)
bin = ncuts - 1;
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
template <typename GradientSumT>
__global__ void SharedMemHistKernel(size_t row_stride, const bst_uint* d_ridx,
common::CompressedIterator<uint32_t> d_gidx,
int null_gidx_value,
GradientSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin, size_t n_elements) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
smem_arr[i] = GradientSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / row_stride + segment_begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
struct Segment {
size_t begin;
size_t end;
Segment() : begin(0), end(0) {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
/** \brief Returns a one if the left node index is encountered, otherwise return
* zero. */
struct IndicateLeftTransform {
int left_nidx;
explicit IndicateLeftTransform(int left_nidx) : left_nidx(left_nidx) {}
__host__ __device__ __forceinline__ int operator()(const int& x) const {
return x == left_nidx ? 1 : 0;
}
};
/**
* \brief Optimised routine for sorting key value pairs into left and right
* segments. Based on a single pass of exclusive scan, uses iterators to
* redirect inputs and outputs.
*/
void SortPosition(dh::CubMemory* temp_memory, common::Span<int> position,
common::Span<int> position_out, common::Span<bst_uint> ridx,
common::Span<bst_uint> ridx_out, int left_nidx,
int right_nidx, int64_t left_count) {
auto d_position_out = position_out.data();
auto d_position_in = position.data();
auto d_ridx_out = ridx_out.data();
auto d_ridx_in = ridx.data();
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
int scatter_address;
if (d_position_in[idx] == left_nidx) {
scatter_address = ex_scan_result;
} else {
scatter_address = (idx - ex_scan_result) + left_count;
}
d_position_out[scatter_address] = d_position_in[idx];
d_ridx_out[scatter_address] = d_ridx_in[idx];
}; // NOLINT
IndicateLeftTransform conversion_op(left_nidx);
hipcub::TransformInputIterator<int, IndicateLeftTransform, int*> in_itr(
d_position_in, conversion_op);
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
position.size());
temp_memory->LazyAllocate(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(temp_memory->d_temp_storage,
temp_memory->temp_storage_bytes, in_itr,
out_itr, position.size());
}
template <typename GradientSumT>
struct DeviceShard;
template <typename GradientSumT>
struct GPUHistBuilderBase {
public:
virtual void Build(DeviceShard<GradientSumT>* shard, int idx) = 0;
virtual ~GPUHistBuilderBase() = default;
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct DeviceShard {
int device_id_;
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
/*! \brief HistCutMatrix stored in device. */
struct DeviceHistCutMatrix {
/*! \brief row_ptr form HistCutMatrix. */
dh::DVec<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
dh::DVec<bst_float> min_fvalue;
/*! \brief Cut. */
dh::DVec<bst_float> gidx_fvalue_map;
} cut_;
/*! \brief Range of rows for each node. */
std::vector<Segment> ridx_segments;
DeviceHistogram<GradientSumT> hist;
/*! \brief global index of histogram, which is stored in ELLPack format. */
dh::DVec<common::CompressedByteT> gidx_buffer;
/*! \brief row length for ELLPack. */
size_t row_stride;
common::CompressedIterator<uint32_t> gidx;
/*! \brief Row indices relative to this shard, necessary for sorting rows. */
dh::DVec2<bst_uint> ridx;
/*! \brief Gradient pair for each row. */
dh::DVec<GradientPair> gpair;
/*! \brief The last histogram index. */
int null_gidx_value;
dh::DVec2<int> position;
dh::DVec<int> monotone_constraints;
dh::DVec<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
dh::DVec<GradientPair> node_sum_gradients_d;
/*! \brief row offset in SparsePage (the input data). */
thrust::device_vector<size_t> row_ptrs;
/*! \brief On-device feature set, only actually used on one of the devices */
thrust::device_vector<int> feature_set_d;
/*! The row offset for this shard. */
bst_uint row_begin_idx;
bst_uint row_end_idx;
bst_uint n_rows;
int n_bins;
TrainParam param;
bool prediction_cache_initialised;
dh::CubMemory temp_memory;
std::unique_ptr<GPUHistBuilderBase<GradientSumT>> hist_builder;
// TODO(canonizer): do add support multi-batch DMatrix here
DeviceShard(int device_id, bst_uint row_begin, bst_uint row_end,
TrainParam _param)
: device_id_(device_id),
row_begin_idx(row_begin),
row_end_idx(row_end),
row_stride(0),
n_rows(row_end - row_begin),
n_bins(0),
null_gidx_value(0),
param(_param),
prediction_cache_initialised(false) {}
/* Init row_ptrs and row_stride */
void InitRowPtrs(const SparsePage& row_batch) {
dh::safe_cuda(hipSetDevice(device_id_));
const auto& offset_vec = row_batch.offset.HostVector();
row_ptrs.resize(n_rows + 1);
thrust::copy(offset_vec.data() + row_begin_idx,
offset_vec.data() + row_end_idx + 1,
row_ptrs.begin());
auto row_iter = row_ptrs.begin();
// find the maximum row size for converting to ELLPack
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
}
/*
Init:
n_bins, null_gidx_value, gidx_buffer, row_ptrs, gidx, gidx_fvalue_map,
min_fvalue, feature_segments, node_sum_gradients, ridx_segments,
hist
*/
void InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch);
void CreateHistIndices(const SparsePage& row_batch);
~DeviceShard() {
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair) {
dh::safe_cuda(hipSetDevice(device_id_));
position.CurrentDVec().Fill(0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
thrust::sequence(ridx.CurrentDVec().tbegin(), ridx.CurrentDVec().tend());
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
this->gpair.copy(dh_gpair->tcbegin(device_id_),
dh_gpair->tcend(device_id_));
SubsampleGradientPair(&gpair, param.subsample, row_begin_idx);
hist.Reset();
}
DeviceSplitCandidate EvaluateSplit(int nidx,
const std::vector<int>& feature_set,
ValueConstraint value_constraint) {
dh::safe_cuda(hipSetDevice(device_id_));
auto d_split_candidates = temp_memory.GetSpan<DeviceSplitCandidate>(feature_set.size());
feature_set_d.resize(feature_set.size());
auto d_features = common::Span<int>(feature_set_d.data().get(),
feature_set_d.size());
dh::safe_cuda(hipMemcpyAsync(d_features.data(), feature_set.data(),
d_features.size_bytes(), hipMemcpyDefault));
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
// One block for each feature
int constexpr BLOCK_THREADS = 256;
hipLaunchKernelGGL(( EvaluateSplitKernel<BLOCK_THREADS, GradientSumT>)
, dim3(uint32_t(feature_set.size())), dim3(BLOCK_THREADS), 0, 0,
hist.GetNodeHistogram(nidx), d_features, node,
cut_.feature_segments.GetSpan(), cut_.min_fvalue.GetSpan(),
cut_.gidx_fvalue_map.GetSpan(), GPUTrainingParam(param),
d_split_candidates, value_constraint,
monotone_constraints.GetSpan());
std::vector<DeviceSplitCandidate> split_candidates(feature_set.size());
dh::safe_cuda(hipMemcpy(split_candidates.data(), d_split_candidates.data(),
split_candidates.size() * sizeof(DeviceSplitCandidate),
hipMemcpyDeviceToHost));
DeviceSplitCandidate best_split;
for (auto candidate : split_candidates) {
best_split.Update(candidate, param);
}
return best_split;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
hist_builder->Build(this, nidx);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id_, hist.n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx,
int64_t split_gidx, bool default_dir_left, bool is_dense,
int fidx_begin, // cut.row_ptr[fidx]
int fidx_end) { // cut.row_ptr[fidx + 1]
dh::safe_cuda(hipSetDevice(device_id_));
Segment segment = ridx_segments[nidx];
bst_uint* d_ridx = ridx.Current();
int* d_position = position.Current();
common::CompressedIterator<uint32_t> d_gidx = gidx;
size_t row_stride = this->row_stride;
// Launch 1 thread for each row
dh::LaunchN<1, 128>(
device_id_, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
bst_uint ridx = d_ridx[idx];
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
// FIXME: Maybe just search the cuts again.
gidx = d_gidx[row_begin + fidx];
} else {
gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin,
fidx_end);
}
// belong to left node or right node.
int position;
if (gidx >= 0) {
// Feature is found
position = gidx <= split_gidx ? left_nidx : right_nidx;
} else {
// Feature is missing
position = default_dir_left ? left_nidx : right_nidx;
}
d_position[idx] = position;
});
IndicateLeftTransform conversion_op(left_nidx);
hipcub::TransformInputIterator<int, IndicateLeftTransform, int*> left_itr(
d_position + segment.begin, conversion_op);
int left_count = dh::SumReduction(temp_memory, left_itr, segment.Size());
CHECK_LE(left_count, segment.Size());
CHECK_GE(left_count, 0);
SortPositionAndCopy(segment, left_nidx, right_nidx, left_count);
ridx_segments[left_nidx] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[right_nidx] =
Segment(segment.begin + left_count, segment.end);
}
/*! \brief Sort row indices according to position. */
void SortPositionAndCopy(const Segment& segment, int left_nidx, int right_nidx,
size_t left_count) {
SortPosition(
&temp_memory,
common::Span<int>(position.Current() + segment.begin, segment.Size()),
common::Span<int>(position.other() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.Current() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.other() + segment.begin, segment.Size()),
left_nidx, right_nidx, left_count);
// Copy back key/value
const auto d_position_current = position.Current() + segment.begin;
const auto d_position_other = position.other() + segment.begin;
const auto d_ridx_current = ridx.Current() + segment.begin;
const auto d_ridx_other = ridx.other() + segment.begin;
dh::LaunchN(device_id_, segment.Size(), [=] __device__(size_t idx) {
d_position_current[idx] = d_position_other[idx];
d_ridx_current[idx] = d_ridx_other[idx];
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id_));
if (!prediction_cache_initialised) {
dh::safe_cuda(hipMemcpyAsync(
prediction_cache.Data(), out_preds_d,
prediction_cache.Size() * sizeof(bst_float), hipMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(hipMemcpyAsync(node_sum_gradients_d.Data(),
node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.Data();
auto d_prediction_cache = prediction_cache.Data();
dh::LaunchN(
device_id_, prediction_cache.Size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.Data(),
prediction_cache.Size() * sizeof(bst_float), hipMemcpyDefault));
}
};
template <typename GradientSumT>
struct SharedMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
auto segment = shard->ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = shard->hist.GetNodeHistogram(nidx);
auto d_gidx = shard->gidx;
auto d_ridx = shard->ridx.Current();
auto d_gpair = shard->gpair.Data();
int null_gidx_value = shard->null_gidx_value;
auto n_elements = segment.Size() * shard->row_stride;
const size_t smem_size = sizeof(GradientSumT) * shard->null_gidx_value;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
dh::safe_cuda(hipSetDevice(shard->device_id_));
hipLaunchKernelGGL(( SharedMemHistKernel), dim3(grid_size), dim3(block_threads), smem_size, 0,
shard->row_stride, d_ridx, d_gidx, null_gidx_value, d_node_hist.data(), d_gpair,
segment_begin, n_elements);
}
};
template <typename GradientSumT>
struct GlobalMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
Segment segment = shard->ridx_segments[nidx];
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
common::CompressedIterator<uint32_t> d_gidx = shard->gidx;
bst_uint* d_ridx = shard->ridx.Current();
GradientPair* d_gpair = shard->gpair.Data();
size_t const n_elements = segment.Size() * shard->row_stride;
size_t const row_stride = shard->row_stride;
int const null_gidx_value = shard->null_gidx_value;
dh::LaunchN(shard->device_id_, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / row_stride) + segment.begin];
// lookup the index (bin) of histogram.
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
};
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch) {
n_bins = hmat.row_ptr.back();
null_gidx_value = hmat.row_ptr.back();
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id_,
&gpair, n_rows,
&ridx, n_rows,
&position, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&cut_.feature_segments, hmat.row_ptr.size(),
&cut_.gidx_fvalue_map, hmat.cut.size(),
&cut_.min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
cut_.gidx_fvalue_map = hmat.cut;
cut_.min_fvalue = hmat.min_val;
cut_.feature_segments = hmat.row_ptr;
monotone_constraints = param.monotone_constraints;
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
dh::safe_cuda(hipSetDevice(device_id_));
// allocate compressed bin data
int num_symbols = n_bins + 1;
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_id_, &gidx_buffer, compressed_size_bytes);
gidx_buffer.Fill(0);
int nbits = common::detail::SymbolBits(num_symbols);
CreateHistIndices(row_batch);
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.Data(), num_symbols);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency hiding)
auto histogram_size = sizeof(GradientSumT) * null_gidx_value;
auto max_smem = dh::MaxSharedMemory(device_id_);
if (histogram_size <= max_smem) {
hist_builder.reset(new SharedMemHistBuilder<GradientSumT>);
} else {
hist_builder.reset(new GlobalMemHistBuilder<GradientSumT>);
}
// Init histogram
hist.Init(device_id_, hmat.row_ptr.back());
}
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::CreateHistIndices(const SparsePage& row_batch) {
int num_symbols = n_bins + 1;
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min
(dh::TotalMemory(device_id_) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(n_rows));
const std::vector<Entry>& data_vec = row_batch.data.HostVector();
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
// number of entries in this batch.
size_t n_entries = row_ptrs[batch_row_end] - row_ptrs[batch_row_begin];
// copy data entries to device.
dh::safe_cuda
(hipMemcpy
(entries_d.data().get(), data_vec.data() + row_ptrs[batch_row_begin],
n_entries * sizeof(Entry), hipMemcpyDefault));
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
hipLaunchKernelGGL(( compress_bin_ellpack_k), dim3(grid3), dim3(block3), 0, 0,
common::CompressedBufferWriter(num_symbols),
gidx_buffer.Data(),
row_ptrs.data().get() + batch_row_begin,
entries_d.data().get(),
cut_.gidx_fvalue_map.Data(), cut_.feature_segments.Data(),
batch_row_begin, batch_nrows,
row_ptrs[batch_row_begin],
row_stride, null_gidx_value);
}
// free the memory that is no longer needed
row_ptrs.resize(0);
row_ptrs.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
}
template <typename GradientSumT>
class GPUHistMakerSpecialised{
public:
struct ExpandEntry;
GPUHistMakerSpecialised() : initialised_(false), p_last_fmat_(nullptr) {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) {
param_.InitAllowUnknown(args);
hist_maker_param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
dist_ = GPUDistribution::Block(GPUSet::All(param_.gpu_id, param_.n_gpus));
dh::CheckComputeCapability();
if (param_.grow_policy == TrainParam::kLossGuide) {
qexpand_.reset(new ExpandQueue(LossGuide));
} else {
qexpand_.reset(new ExpandQueue(DepthWise));
}
monitor_.Init("updater_gpu_hist");
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
int n_devices = dist_.Devices().Size();
device_list_.resize(n_devices);
for (int index = 0; index < n_devices; ++index) {
int device_id = dist_.Devices().DeviceId(index);
device_list_[index] = device_id;
}
reducer_.Init(device_list_);
auto batch_iter = dmat->GetRowBatches().begin();
const SparsePage& batch = *batch_iter;
// Create device shards
shards_.resize(n_devices);
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
size_t start = dist_.ShardStart(info_->num_row_, i);
size_t size = dist_.ShardSize(info_->num_row_, i);
shard = std::unique_ptr<DeviceShard<GradientSumT>>
(new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(i),
start, start + size, param_));
shard->InitRowPtrs(batch);
});
// Find the cuts.
monitor_.StartCuda("Quantiles");
common::DeviceSketch(batch, *info_, param_, &hmat_, hist_maker_param_.gpu_batch_nrows);
n_bins_ = hmat_.row_ptr.back();
monitor_.StopCuda("Quantiles");
monitor_.StartCuda("BinningCompression");
dh::ExecuteIndexShards(&shards_, [&](int idx,
std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->InitCompressedData(hmat_, batch);
});
monitor_.StopCuda("BinningCompression");
++batch_iter;
CHECK(batch_iter.AtEnd()) << "External memory not supported";
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
column_sampler_.Init(info_->num_col_, param_.colsample_bynode,
param_.colsample_bylevel, param_.colsample_bytree);
// Copy gpair & reset memory
monitor_.StartCuda("InitDataReset");
gpair->Reshard(dist_);
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->Reset(gpair);
});
monitor_.StopCuda("InitDataReset");
}
void AllReduceHist(int nidx) {
if (shards_.size() == 1 && !rabit::IsDistributed()) return;
monitor_.StartCuda("AllReduce");
reducer_.GroupStart();
for (auto& shard : shards_) {
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
reducer_.AllReduceSum(
dist_.Devices().Index(shard->device_id_),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
n_bins_ * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
}
reducer_.GroupEnd();
reducer_.Synchronize();
monitor_.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) {
size_t left_node_max_elements = 0;
size_t right_node_max_elements = 0;
for (auto& shard : shards_) {
left_node_max_elements = (std::max)(
left_node_max_elements, shard->ridx_segments[nidx_left].Size());
right_node_max_elements = (std::max)(
right_node_max_elements, shard->ridx_segments[nidx_right].Size());
}
rabit::Allreduce<rabit::op::Max, size_t>(&left_node_max_elements, 1);
rabit::Allreduce<rabit::op::Max, size_t>(&right_node_max_elements, 1);
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
if (right_node_max_elements < left_node_max_elements) {
build_hist_nidx = nidx_right;
subtraction_trick_nidx = nidx_left;
}
// Build histogram for node with the smallest number of training examples
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(build_hist_nidx);
});
this->AllReduceHist(build_hist_nidx);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = true;
for (auto& shard : shards_) {
do_subtraction_trick &= shard->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
}
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
});
} else {
// Calculate other histogram manually
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(subtraction_trick_nidx);
});
this->AllReduceHist(subtraction_trick_nidx);
}
}
DeviceSplitCandidate EvaluateSplit(int nidx, RegTree* p_tree) {
return shards_.front()->EvaluateSplit(
nidx, *column_sampler_.GetFeatureSet(p_tree->GetDepth(nidx)),
node_value_constraints_[nidx]);
}
void InitRoot(RegTree* p_tree) {
constexpr int root_nidx = 0;
// Sum gradients
std::vector<GradientPair> tmp_sums(shards_.size());
dh::ExecuteIndexShards(
&shards_,
[&](int i, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(hipSetDevice(shard->device_id_));
tmp_sums[i] = dh::SumReduction(
shard->temp_memory, shard->gpair.Data(), shard->gpair.Size());
});
GradientPair sum_gradient =
std::accumulate(tmp_sums.begin(), tmp_sums.end(), GradientPair());
rabit::Allreduce<rabit::op::Sum>((GradientPair::ValueT*)&sum_gradient, 2);
// Generate root histogram
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(root_nidx);
});
this->AllReduceHist(root_nidx);
// Remember root stats
p_tree->Stat(root_nidx).sum_hess = sum_gradient.GetHess();
auto weight = CalcWeight(param_, sum_gradient);
p_tree->Stat(root_nidx).base_weight = weight;
(*p_tree)[root_nidx].SetLeaf(param_.learning_rate * weight);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[root_nidx] = sum_gradient;
}
// Initialise root constraint
node_value_constraints_.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplit(root_nidx, p_tree);
qexpand_->push(
ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), split, 0));
}
void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) {
int nidx = candidate.nid;
int left_nidx = (*p_tree)[nidx].LeftChild();
int right_nidx = (*p_tree)[nidx].RightChild();
// convert floating-point split_pt into corresponding bin_id
// split_cond = -1 indicates that split_pt is less than all known cut points
int64_t split_gidx = -1;
int64_t fidx = candidate.split.findex;
bool default_dir_left = candidate.split.dir == kLeftDir;
uint32_t fidx_begin = hmat_.row_ptr[fidx];
uint32_t fidx_end = hmat_.row_ptr[fidx + 1];
// split_gidx = i where i is the i^th bin containing split value.
for (auto i = fidx_begin; i < fidx_end; ++i) {
if (candidate.split.fvalue == hmat_.cut[i]) {
split_gidx = static_cast<int64_t>(i);
}
}
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx,
default_dir_left, is_dense, fidx_begin,
fidx_end);
});
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats;
left_stats.Add(candidate.split.left_sum);
GradStats right_stats;
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum;
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints_.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints_[candidate.nid].CalcWeight(param_, parent_sum);
auto left_weight =
node_value_constraints_[candidate.nid].CalcWeight(param_, left_stats)*param_.learning_rate;
auto right_weight =
node_value_constraints_[candidate.nid].CalcWeight(param_, right_stats)*param_.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints_.resize(tree.GetNodes().size());
node_value_constraints_[candidate.nid].SetChild(
param_, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints_[tree[candidate.nid].LeftChild()],
&node_value_constraints_[tree[candidate.nid].RightChild()]);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum;
shard->node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum;
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
auto& tree = *p_tree;
monitor_.StartCuda("InitData");
this->InitData(gpair, p_fmat);
monitor_.StopCuda("InitData");
monitor_.StartCuda("InitRoot");
this->InitRoot(p_tree);
monitor_.StopCuda("InitRoot");
auto timestamp = qexpand_->size();
auto num_leaves = 1;
while (!qexpand_->empty()) {
ExpandEntry candidate = qexpand_->top();
qexpand_->pop();
if (!candidate.IsValid(param_, num_leaves)) continue;
this->ApplySplit(candidate, p_tree);
monitor_.StartCuda("UpdatePosition");
this->UpdatePosition(candidate, p_tree);
monitor_.StopCuda("UpdatePosition");
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param_, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor_.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx,
right_child_nidx);
monitor_.StopCuda("BuildHist");
monitor_.StartCuda("EvaluateSplits");
auto left_child_split = this->EvaluateSplit(left_child_nidx, p_tree);
auto right_child_split = this->EvaluateSplit(right_child_nidx, p_tree);
qexpand_->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx),
left_child_split, timestamp++));
qexpand_->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
right_child_split, timestamp++));
monitor_.StopCuda("EvaluateSplits");
}
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data)
return false;
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->Reshard(dist_.Devices());
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdatePredictionCache(
p_out_preds->DevicePointer(shard->device_id_));
});
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split,
uint64_t timestamp)
: nid(nid), depth(depth), split(split), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0)
return false;
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth,
int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
TrainParam param_;
GPUHistMakerTrainParam hist_maker_param_;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info_;
bool initialised_;
int n_devices_;
int n_bins_;
std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_;
common::ColumnSampler column_sampler_;
using ExpandQueue = std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand_;
common::Monitor monitor_;
dh::AllReducer reducer_;
std::vector<ValueConstraint> node_value_constraints_;
/*! List storing device id. */
std::vector<int> device_list_;
DMatrix* p_last_fmat_;
GPUDistribution dist_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Init(args);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Init(args);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
|
38455d878b0ad48d0658a02ad180c36a25cae855.cu
|
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "../common/span.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
}
};
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename scan_t,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx,
common::Span<const GradientSumT> node_histogram,
common::Span<const uint32_t> feature_segments, // cut.row_ptr
float min_fvalue, // cut.min_value
common::Span<const float> gidx_fvalue_map, // cut.cut
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = feature_segments[fidx]; // begining bin
uint32_t gidx_end = feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
scan_t(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int gidx = scan_begin + threadIdx.x;
float fvalue =
gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1];
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir,
fvalue, fidx,
GradientPair(left),
GradientPair(right),
param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT>
node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
common::Span<const uint32_t>
d_feature_segments, // row_ptr form HistCutMatrix
common::Span<const float> d_fidx_min_map, // min_value
common::Span<const float> d_gidx_fvalue_map, // cut
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<
GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT;
typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef cub::BlockReduce<GradientSumT, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram,
d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
&best_split, node, gpu_param, &temp_storage, constraint,
value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
template <typename GidxIterT>
__device__ int BinarySearchRow(bst_uint begin, bst_uint end, GidxIterT data,
int const fidx_begin, int const fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT>
struct DeviceHistogram {
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map;
thrust::device_vector<typename GradientSumT::ValueT> data;
const size_t kStopGrowingSize = 1 << 26; // Do not grow beyond this size
int n_bins;
int device_id_;
void Init(int device_id, int n_bins) {
this->n_bins = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(cudaSetDevice(device_id_));
dh::safe_cuda(cudaMemsetAsync(
data.data().get(), 0,
data.size() * sizeof(typename decltype(data)::value_type)));
nidx_map.clear();
}
bool HistogramExists(int nidx) {
return nidx_map.find(nidx) != nidx_map.end();
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
size_t current_size =
nidx_map.size() * n_bins * 2; // Number of items currently used in data
dh::safe_cuda(cudaSetDevice(device_id_));
if (data.size() >= kStopGrowingSize) {
// Recycle histogram memory
std::pair<int, size_t> old_entry = *nidx_map.begin();
nidx_map.erase(old_entry.first);
dh::safe_cuda(cudaMemsetAsync(data.data().get() + old_entry.second, 0,
n_bins * sizeof(GradientSumT)));
nidx_map[nidx] = old_entry.second;
} else {
// Append new node histogram
nidx_map[nidx] = current_size;
if (data.size() < current_size + n_bins * 2) {
size_t new_size = current_size * 2; // Double in size
new_size = std::max(static_cast<size_t>(n_bins * 2),
new_size); // Have at least one histogram
data.resize(new_size);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data.data().get() + nidx_map[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Bin each input data entry, store the bin indices in compressed form.
__global__ void compress_bin_ellpack_k(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistCutMatrix::cut
const uint32_t* __restrict__ cut_rows, // HistCutMatrix::row_ptrs
size_t base_row, // batch_row_begin
size_t n_rows,
// row_ptr_begin: row_offset[base_row], the start position of base_row
size_t row_ptr_begin,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride)
return;
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
bin = dh::UpperBound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts)
bin = ncuts - 1;
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
template <typename GradientSumT>
__global__ void SharedMemHistKernel(size_t row_stride, const bst_uint* d_ridx,
common::CompressedIterator<uint32_t> d_gidx,
int null_gidx_value,
GradientSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin, size_t n_elements) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
smem_arr[i] = GradientSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / row_stride + segment_begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
struct Segment {
size_t begin;
size_t end;
Segment() : begin(0), end(0) {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
/** \brief Returns a one if the left node index is encountered, otherwise return
* zero. */
struct IndicateLeftTransform {
int left_nidx;
explicit IndicateLeftTransform(int left_nidx) : left_nidx(left_nidx) {}
__host__ __device__ __forceinline__ int operator()(const int& x) const {
return x == left_nidx ? 1 : 0;
}
};
/**
* \brief Optimised routine for sorting key value pairs into left and right
* segments. Based on a single pass of exclusive scan, uses iterators to
* redirect inputs and outputs.
*/
void SortPosition(dh::CubMemory* temp_memory, common::Span<int> position,
common::Span<int> position_out, common::Span<bst_uint> ridx,
common::Span<bst_uint> ridx_out, int left_nidx,
int right_nidx, int64_t left_count) {
auto d_position_out = position_out.data();
auto d_position_in = position.data();
auto d_ridx_out = ridx_out.data();
auto d_ridx_in = ridx.data();
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
int scatter_address;
if (d_position_in[idx] == left_nidx) {
scatter_address = ex_scan_result;
} else {
scatter_address = (idx - ex_scan_result) + left_count;
}
d_position_out[scatter_address] = d_position_in[idx];
d_ridx_out[scatter_address] = d_ridx_in[idx];
}; // NOLINT
IndicateLeftTransform conversion_op(left_nidx);
cub::TransformInputIterator<int, IndicateLeftTransform, int*> in_itr(
d_position_in, conversion_op);
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
position.size());
temp_memory->LazyAllocate(temp_storage_bytes);
cub::DeviceScan::ExclusiveSum(temp_memory->d_temp_storage,
temp_memory->temp_storage_bytes, in_itr,
out_itr, position.size());
}
template <typename GradientSumT>
struct DeviceShard;
template <typename GradientSumT>
struct GPUHistBuilderBase {
public:
virtual void Build(DeviceShard<GradientSumT>* shard, int idx) = 0;
virtual ~GPUHistBuilderBase() = default;
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct DeviceShard {
int device_id_;
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
/*! \brief HistCutMatrix stored in device. */
struct DeviceHistCutMatrix {
/*! \brief row_ptr form HistCutMatrix. */
dh::DVec<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
dh::DVec<bst_float> min_fvalue;
/*! \brief Cut. */
dh::DVec<bst_float> gidx_fvalue_map;
} cut_;
/*! \brief Range of rows for each node. */
std::vector<Segment> ridx_segments;
DeviceHistogram<GradientSumT> hist;
/*! \brief global index of histogram, which is stored in ELLPack format. */
dh::DVec<common::CompressedByteT> gidx_buffer;
/*! \brief row length for ELLPack. */
size_t row_stride;
common::CompressedIterator<uint32_t> gidx;
/*! \brief Row indices relative to this shard, necessary for sorting rows. */
dh::DVec2<bst_uint> ridx;
/*! \brief Gradient pair for each row. */
dh::DVec<GradientPair> gpair;
/*! \brief The last histogram index. */
int null_gidx_value;
dh::DVec2<int> position;
dh::DVec<int> monotone_constraints;
dh::DVec<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
dh::DVec<GradientPair> node_sum_gradients_d;
/*! \brief row offset in SparsePage (the input data). */
thrust::device_vector<size_t> row_ptrs;
/*! \brief On-device feature set, only actually used on one of the devices */
thrust::device_vector<int> feature_set_d;
/*! The row offset for this shard. */
bst_uint row_begin_idx;
bst_uint row_end_idx;
bst_uint n_rows;
int n_bins;
TrainParam param;
bool prediction_cache_initialised;
dh::CubMemory temp_memory;
std::unique_ptr<GPUHistBuilderBase<GradientSumT>> hist_builder;
// TODO(canonizer): do add support multi-batch DMatrix here
DeviceShard(int device_id, bst_uint row_begin, bst_uint row_end,
TrainParam _param)
: device_id_(device_id),
row_begin_idx(row_begin),
row_end_idx(row_end),
row_stride(0),
n_rows(row_end - row_begin),
n_bins(0),
null_gidx_value(0),
param(_param),
prediction_cache_initialised(false) {}
/* Init row_ptrs and row_stride */
void InitRowPtrs(const SparsePage& row_batch) {
dh::safe_cuda(cudaSetDevice(device_id_));
const auto& offset_vec = row_batch.offset.HostVector();
row_ptrs.resize(n_rows + 1);
thrust::copy(offset_vec.data() + row_begin_idx,
offset_vec.data() + row_end_idx + 1,
row_ptrs.begin());
auto row_iter = row_ptrs.begin();
// find the maximum row size for converting to ELLPack
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
}
/*
Init:
n_bins, null_gidx_value, gidx_buffer, row_ptrs, gidx, gidx_fvalue_map,
min_fvalue, feature_segments, node_sum_gradients, ridx_segments,
hist
*/
void InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch);
void CreateHistIndices(const SparsePage& row_batch);
~DeviceShard() {
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair) {
dh::safe_cuda(cudaSetDevice(device_id_));
position.CurrentDVec().Fill(0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
thrust::sequence(ridx.CurrentDVec().tbegin(), ridx.CurrentDVec().tend());
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
this->gpair.copy(dh_gpair->tcbegin(device_id_),
dh_gpair->tcend(device_id_));
SubsampleGradientPair(&gpair, param.subsample, row_begin_idx);
hist.Reset();
}
DeviceSplitCandidate EvaluateSplit(int nidx,
const std::vector<int>& feature_set,
ValueConstraint value_constraint) {
dh::safe_cuda(cudaSetDevice(device_id_));
auto d_split_candidates = temp_memory.GetSpan<DeviceSplitCandidate>(feature_set.size());
feature_set_d.resize(feature_set.size());
auto d_features = common::Span<int>(feature_set_d.data().get(),
feature_set_d.size());
dh::safe_cuda(cudaMemcpyAsync(d_features.data(), feature_set.data(),
d_features.size_bytes(), cudaMemcpyDefault));
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
// One block for each feature
int constexpr BLOCK_THREADS = 256;
EvaluateSplitKernel<BLOCK_THREADS, GradientSumT>
<<<uint32_t(feature_set.size()), BLOCK_THREADS, 0>>>(
hist.GetNodeHistogram(nidx), d_features, node,
cut_.feature_segments.GetSpan(), cut_.min_fvalue.GetSpan(),
cut_.gidx_fvalue_map.GetSpan(), GPUTrainingParam(param),
d_split_candidates, value_constraint,
monotone_constraints.GetSpan());
std::vector<DeviceSplitCandidate> split_candidates(feature_set.size());
dh::safe_cuda(cudaMemcpy(split_candidates.data(), d_split_candidates.data(),
split_candidates.size() * sizeof(DeviceSplitCandidate),
cudaMemcpyDeviceToHost));
DeviceSplitCandidate best_split;
for (auto candidate : split_candidates) {
best_split.Update(candidate, param);
}
return best_split;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
hist_builder->Build(this, nidx);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id_, hist.n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx,
int64_t split_gidx, bool default_dir_left, bool is_dense,
int fidx_begin, // cut.row_ptr[fidx]
int fidx_end) { // cut.row_ptr[fidx + 1]
dh::safe_cuda(cudaSetDevice(device_id_));
Segment segment = ridx_segments[nidx];
bst_uint* d_ridx = ridx.Current();
int* d_position = position.Current();
common::CompressedIterator<uint32_t> d_gidx = gidx;
size_t row_stride = this->row_stride;
// Launch 1 thread for each row
dh::LaunchN<1, 128>(
device_id_, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
bst_uint ridx = d_ridx[idx];
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
// FIXME: Maybe just search the cuts again.
gidx = d_gidx[row_begin + fidx];
} else {
gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin,
fidx_end);
}
// belong to left node or right node.
int position;
if (gidx >= 0) {
// Feature is found
position = gidx <= split_gidx ? left_nidx : right_nidx;
} else {
// Feature is missing
position = default_dir_left ? left_nidx : right_nidx;
}
d_position[idx] = position;
});
IndicateLeftTransform conversion_op(left_nidx);
cub::TransformInputIterator<int, IndicateLeftTransform, int*> left_itr(
d_position + segment.begin, conversion_op);
int left_count = dh::SumReduction(temp_memory, left_itr, segment.Size());
CHECK_LE(left_count, segment.Size());
CHECK_GE(left_count, 0);
SortPositionAndCopy(segment, left_nidx, right_nidx, left_count);
ridx_segments[left_nidx] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[right_nidx] =
Segment(segment.begin + left_count, segment.end);
}
/*! \brief Sort row indices according to position. */
void SortPositionAndCopy(const Segment& segment, int left_nidx, int right_nidx,
size_t left_count) {
SortPosition(
&temp_memory,
common::Span<int>(position.Current() + segment.begin, segment.Size()),
common::Span<int>(position.other() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.Current() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.other() + segment.begin, segment.Size()),
left_nidx, right_nidx, left_count);
// Copy back key/value
const auto d_position_current = position.Current() + segment.begin;
const auto d_position_other = position.other() + segment.begin;
const auto d_ridx_current = ridx.Current() + segment.begin;
const auto d_ridx_other = ridx.other() + segment.begin;
dh::LaunchN(device_id_, segment.Size(), [=] __device__(size_t idx) {
d_position_current[idx] = d_position_other[idx];
d_ridx_current[idx] = d_ridx_other[idx];
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id_));
if (!prediction_cache_initialised) {
dh::safe_cuda(cudaMemcpyAsync(
prediction_cache.Data(), out_preds_d,
prediction_cache.Size() * sizeof(bst_float), cudaMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(cudaMemcpyAsync(node_sum_gradients_d.Data(),
node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.Data();
auto d_prediction_cache = prediction_cache.Data();
dh::LaunchN(
device_id_, prediction_cache.Size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.Data(),
prediction_cache.Size() * sizeof(bst_float), cudaMemcpyDefault));
}
};
template <typename GradientSumT>
struct SharedMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
auto segment = shard->ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = shard->hist.GetNodeHistogram(nidx);
auto d_gidx = shard->gidx;
auto d_ridx = shard->ridx.Current();
auto d_gpair = shard->gpair.Data();
int null_gidx_value = shard->null_gidx_value;
auto n_elements = segment.Size() * shard->row_stride;
const size_t smem_size = sizeof(GradientSumT) * shard->null_gidx_value;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
dh::safe_cuda(cudaSetDevice(shard->device_id_));
SharedMemHistKernel<<<grid_size, block_threads, smem_size>>>
(shard->row_stride, d_ridx, d_gidx, null_gidx_value, d_node_hist.data(), d_gpair,
segment_begin, n_elements);
}
};
template <typename GradientSumT>
struct GlobalMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
Segment segment = shard->ridx_segments[nidx];
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
common::CompressedIterator<uint32_t> d_gidx = shard->gidx;
bst_uint* d_ridx = shard->ridx.Current();
GradientPair* d_gpair = shard->gpair.Data();
size_t const n_elements = segment.Size() * shard->row_stride;
size_t const row_stride = shard->row_stride;
int const null_gidx_value = shard->null_gidx_value;
dh::LaunchN(shard->device_id_, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / row_stride) + segment.begin];
// lookup the index (bin) of histogram.
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
};
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch) {
n_bins = hmat.row_ptr.back();
null_gidx_value = hmat.row_ptr.back();
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id_,
&gpair, n_rows,
&ridx, n_rows,
&position, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&cut_.feature_segments, hmat.row_ptr.size(),
&cut_.gidx_fvalue_map, hmat.cut.size(),
&cut_.min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
cut_.gidx_fvalue_map = hmat.cut;
cut_.min_fvalue = hmat.min_val;
cut_.feature_segments = hmat.row_ptr;
monotone_constraints = param.monotone_constraints;
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
dh::safe_cuda(cudaSetDevice(device_id_));
// allocate compressed bin data
int num_symbols = n_bins + 1;
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_id_, &gidx_buffer, compressed_size_bytes);
gidx_buffer.Fill(0);
int nbits = common::detail::SymbolBits(num_symbols);
CreateHistIndices(row_batch);
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.Data(), num_symbols);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency hiding)
auto histogram_size = sizeof(GradientSumT) * null_gidx_value;
auto max_smem = dh::MaxSharedMemory(device_id_);
if (histogram_size <= max_smem) {
hist_builder.reset(new SharedMemHistBuilder<GradientSumT>);
} else {
hist_builder.reset(new GlobalMemHistBuilder<GradientSumT>);
}
// Init histogram
hist.Init(device_id_, hmat.row_ptr.back());
}
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::CreateHistIndices(const SparsePage& row_batch) {
int num_symbols = n_bins + 1;
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min
(dh::TotalMemory(device_id_) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(n_rows));
const std::vector<Entry>& data_vec = row_batch.data.HostVector();
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
// number of entries in this batch.
size_t n_entries = row_ptrs[batch_row_end] - row_ptrs[batch_row_begin];
// copy data entries to device.
dh::safe_cuda
(cudaMemcpy
(entries_d.data().get(), data_vec.data() + row_ptrs[batch_row_begin],
n_entries * sizeof(Entry), cudaMemcpyDefault));
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
compress_bin_ellpack_k<<<grid3, block3>>>
(common::CompressedBufferWriter(num_symbols),
gidx_buffer.Data(),
row_ptrs.data().get() + batch_row_begin,
entries_d.data().get(),
cut_.gidx_fvalue_map.Data(), cut_.feature_segments.Data(),
batch_row_begin, batch_nrows,
row_ptrs[batch_row_begin],
row_stride, null_gidx_value);
}
// free the memory that is no longer needed
row_ptrs.resize(0);
row_ptrs.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
}
template <typename GradientSumT>
class GPUHistMakerSpecialised{
public:
struct ExpandEntry;
GPUHistMakerSpecialised() : initialised_(false), p_last_fmat_(nullptr) {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) {
param_.InitAllowUnknown(args);
hist_maker_param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
dist_ = GPUDistribution::Block(GPUSet::All(param_.gpu_id, param_.n_gpus));
dh::CheckComputeCapability();
if (param_.grow_policy == TrainParam::kLossGuide) {
qexpand_.reset(new ExpandQueue(LossGuide));
} else {
qexpand_.reset(new ExpandQueue(DepthWise));
}
monitor_.Init("updater_gpu_hist");
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
int n_devices = dist_.Devices().Size();
device_list_.resize(n_devices);
for (int index = 0; index < n_devices; ++index) {
int device_id = dist_.Devices().DeviceId(index);
device_list_[index] = device_id;
}
reducer_.Init(device_list_);
auto batch_iter = dmat->GetRowBatches().begin();
const SparsePage& batch = *batch_iter;
// Create device shards
shards_.resize(n_devices);
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
size_t start = dist_.ShardStart(info_->num_row_, i);
size_t size = dist_.ShardSize(info_->num_row_, i);
shard = std::unique_ptr<DeviceShard<GradientSumT>>
(new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(i),
start, start + size, param_));
shard->InitRowPtrs(batch);
});
// Find the cuts.
monitor_.StartCuda("Quantiles");
common::DeviceSketch(batch, *info_, param_, &hmat_, hist_maker_param_.gpu_batch_nrows);
n_bins_ = hmat_.row_ptr.back();
monitor_.StopCuda("Quantiles");
monitor_.StartCuda("BinningCompression");
dh::ExecuteIndexShards(&shards_, [&](int idx,
std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->InitCompressedData(hmat_, batch);
});
monitor_.StopCuda("BinningCompression");
++batch_iter;
CHECK(batch_iter.AtEnd()) << "External memory not supported";
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
column_sampler_.Init(info_->num_col_, param_.colsample_bynode,
param_.colsample_bylevel, param_.colsample_bytree);
// Copy gpair & reset memory
monitor_.StartCuda("InitDataReset");
gpair->Reshard(dist_);
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->Reset(gpair);
});
monitor_.StopCuda("InitDataReset");
}
void AllReduceHist(int nidx) {
if (shards_.size() == 1 && !rabit::IsDistributed()) return;
monitor_.StartCuda("AllReduce");
reducer_.GroupStart();
for (auto& shard : shards_) {
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
reducer_.AllReduceSum(
dist_.Devices().Index(shard->device_id_),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
n_bins_ * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
}
reducer_.GroupEnd();
reducer_.Synchronize();
monitor_.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) {
size_t left_node_max_elements = 0;
size_t right_node_max_elements = 0;
for (auto& shard : shards_) {
left_node_max_elements = (std::max)(
left_node_max_elements, shard->ridx_segments[nidx_left].Size());
right_node_max_elements = (std::max)(
right_node_max_elements, shard->ridx_segments[nidx_right].Size());
}
rabit::Allreduce<rabit::op::Max, size_t>(&left_node_max_elements, 1);
rabit::Allreduce<rabit::op::Max, size_t>(&right_node_max_elements, 1);
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
if (right_node_max_elements < left_node_max_elements) {
build_hist_nidx = nidx_right;
subtraction_trick_nidx = nidx_left;
}
// Build histogram for node with the smallest number of training examples
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(build_hist_nidx);
});
this->AllReduceHist(build_hist_nidx);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = true;
for (auto& shard : shards_) {
do_subtraction_trick &= shard->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
}
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
});
} else {
// Calculate other histogram manually
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(subtraction_trick_nidx);
});
this->AllReduceHist(subtraction_trick_nidx);
}
}
DeviceSplitCandidate EvaluateSplit(int nidx, RegTree* p_tree) {
return shards_.front()->EvaluateSplit(
nidx, *column_sampler_.GetFeatureSet(p_tree->GetDepth(nidx)),
node_value_constraints_[nidx]);
}
void InitRoot(RegTree* p_tree) {
constexpr int root_nidx = 0;
// Sum gradients
std::vector<GradientPair> tmp_sums(shards_.size());
dh::ExecuteIndexShards(
&shards_,
[&](int i, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(cudaSetDevice(shard->device_id_));
tmp_sums[i] = dh::SumReduction(
shard->temp_memory, shard->gpair.Data(), shard->gpair.Size());
});
GradientPair sum_gradient =
std::accumulate(tmp_sums.begin(), tmp_sums.end(), GradientPair());
rabit::Allreduce<rabit::op::Sum>((GradientPair::ValueT*)&sum_gradient, 2);
// Generate root histogram
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->BuildHist(root_nidx);
});
this->AllReduceHist(root_nidx);
// Remember root stats
p_tree->Stat(root_nidx).sum_hess = sum_gradient.GetHess();
auto weight = CalcWeight(param_, sum_gradient);
p_tree->Stat(root_nidx).base_weight = weight;
(*p_tree)[root_nidx].SetLeaf(param_.learning_rate * weight);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[root_nidx] = sum_gradient;
}
// Initialise root constraint
node_value_constraints_.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplit(root_nidx, p_tree);
qexpand_->push(
ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), split, 0));
}
void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) {
int nidx = candidate.nid;
int left_nidx = (*p_tree)[nidx].LeftChild();
int right_nidx = (*p_tree)[nidx].RightChild();
// convert floating-point split_pt into corresponding bin_id
// split_cond = -1 indicates that split_pt is less than all known cut points
int64_t split_gidx = -1;
int64_t fidx = candidate.split.findex;
bool default_dir_left = candidate.split.dir == kLeftDir;
uint32_t fidx_begin = hmat_.row_ptr[fidx];
uint32_t fidx_end = hmat_.row_ptr[fidx + 1];
// split_gidx = i where i is the i^th bin containing split value.
for (auto i = fidx_begin; i < fidx_end; ++i) {
if (candidate.split.fvalue == hmat_.cut[i]) {
split_gidx = static_cast<int64_t>(i);
}
}
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx,
default_dir_left, is_dense, fidx_begin,
fidx_end);
});
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats;
left_stats.Add(candidate.split.left_sum);
GradStats right_stats;
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum;
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints_.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints_[candidate.nid].CalcWeight(param_, parent_sum);
auto left_weight =
node_value_constraints_[candidate.nid].CalcWeight(param_, left_stats)*param_.learning_rate;
auto right_weight =
node_value_constraints_[candidate.nid].CalcWeight(param_, right_stats)*param_.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints_.resize(tree.GetNodes().size());
node_value_constraints_[candidate.nid].SetChild(
param_, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints_[tree[candidate.nid].LeftChild()],
&node_value_constraints_[tree[candidate.nid].RightChild()]);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum;
shard->node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum;
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
auto& tree = *p_tree;
monitor_.StartCuda("InitData");
this->InitData(gpair, p_fmat);
monitor_.StopCuda("InitData");
monitor_.StartCuda("InitRoot");
this->InitRoot(p_tree);
monitor_.StopCuda("InitRoot");
auto timestamp = qexpand_->size();
auto num_leaves = 1;
while (!qexpand_->empty()) {
ExpandEntry candidate = qexpand_->top();
qexpand_->pop();
if (!candidate.IsValid(param_, num_leaves)) continue;
this->ApplySplit(candidate, p_tree);
monitor_.StartCuda("UpdatePosition");
this->UpdatePosition(candidate, p_tree);
monitor_.StopCuda("UpdatePosition");
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param_, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor_.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx,
right_child_nidx);
monitor_.StopCuda("BuildHist");
monitor_.StartCuda("EvaluateSplits");
auto left_child_split = this->EvaluateSplit(left_child_nidx, p_tree);
auto right_child_split = this->EvaluateSplit(right_child_nidx, p_tree);
qexpand_->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx),
left_child_split, timestamp++));
qexpand_->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
right_child_split, timestamp++));
monitor_.StopCuda("EvaluateSplits");
}
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data)
return false;
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->Reshard(dist_.Devices());
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdatePredictionCache(
p_out_preds->DevicePointer(shard->device_id_));
});
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split,
uint64_t timestamp)
: nid(nid), depth(depth), split(split), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0)
return false;
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth,
int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
TrainParam param_;
GPUHistMakerTrainParam hist_maker_param_;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info_;
bool initialised_;
int n_devices_;
int n_bins_;
std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_;
common::ColumnSampler column_sampler_;
using ExpandQueue = std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand_;
common::Monitor monitor_;
dh::AllReducer reducer_;
std::vector<ValueConstraint> node_value_constraints_;
/*! List storing device id. */
std::vector<int> device_list_;
DMatrix* p_last_fmat_;
GPUDistribution dist_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Init(args);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Init(args);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
|
db4d5830974cecc8c40b9c2952d5b6af765ba8c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int *v1, int *v2, int *sol) {
*sol = *v1 + *v2;
printf("Hello Cuda!\n");
}
int main(void) {
int v1, v2, sol; //Host copies of the values
int *d_v1, *d_v2, *d_sol; //int vector for v1,v2,v3 in the device
int size = sizeof(int);
//Allocating space in the device
hipMalloc((void **)&d_v1, size);
hipMalloc((void **)&d_v2, size);
hipMalloc((void **)&d_sol, size);
//setup input
v1 = 17;
v2 = 13;
//Input values to device
hipMemcpy(d_v1, &v1, size, hipMemcpyHostToDevice);
hipMemcpy(d_v2, &v2, size, hipMemcpyHostToDevice);
hipMemcpy(d_sol, &sol, size, hipMemcpyHostToDevice);
//Lauch add() in device
add << <1, 1 >> >(d_v1, d_v2, d_sol);
//Copy result from device to host
hipMemcpy(&sol, d_sol, size, hipMemcpyDeviceToHost);
//Cleanup
printf(" Hello %d \n", sol);
hipFree(d_v1);
hipFree(d_v2);
hipFree(d_sol);
return 0;
}
|
db4d5830974cecc8c40b9c2952d5b6af765ba8c9.cu
|
#include <stdio.h>
__global__ void add(int *v1, int *v2, int *sol) {
*sol = *v1 + *v2;
printf("Hello Cuda!\n");
}
int main(void) {
int v1, v2, sol; //Host copies of the values
int *d_v1, *d_v2, *d_sol; //int vector for v1,v2,v3 in the device
int size = sizeof(int);
//Allocating space in the device
cudaMalloc((void **)&d_v1, size);
cudaMalloc((void **)&d_v2, size);
cudaMalloc((void **)&d_sol, size);
//setup input
v1 = 17;
v2 = 13;
//Input values to device
cudaMemcpy(d_v1, &v1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, &v2, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sol, &sol, size, cudaMemcpyHostToDevice);
//Lauch add() in device
add << <1, 1 >> >(d_v1, d_v2, d_sol);
//Copy result from device to host
cudaMemcpy(&sol, d_sol, size, cudaMemcpyDeviceToHost);
//Cleanup
printf(" Hello %d \n", sol);
cudaFree(d_v1);
cudaFree(d_v2);
cudaFree(d_sol);
return 0;
}
|
f84dec0b71659606c14c325c7d0d2f463b07af9e.hip
|
// !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(31/08/2016)
///sta versin sirve para graficar en matlab los tiempos de ejecucin, considerando (RADIX-2) N = 2^20, Li = 33 y Lo = {2,4,8,...,2^20}
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Li_max = 33;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(1);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N20_Li33_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N20_Li33_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
for(j_res= Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Lo=(int )pow(2,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*(pow(2,N_max)));
buffer_imag = (float*)malloc(sizeof(float)*(pow(2,N_max)));
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),(int)pow(2,N_max),db_open);
fread(buffer_imag,sizeof(float),(int)pow(2,N_max),dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
f84dec0b71659606c14c325c7d0d2f463b07af9e.cu
|
///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(31/08/2016)
///Ésta versión sirve para graficar en matlab los tiempos de ejecución, considerando (RADIX-2) N = 2^20, Li = 33 y Lo = {2,4,8,...,2^20}
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Li_max = 33;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(1);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N20_Li33_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N20_Li33_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
for(j_res= Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Lo=(int )pow(2,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*(pow(2,N_max)));
buffer_imag = (float*)malloc(sizeof(float)*(pow(2,N_max)));
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),(int)pow(2,N_max),db_open);
fread(buffer_imag,sizeof(float),(int)pow(2,N_max),dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
4f3e098c29bb198960f7c74d2fd1216f5d3a7041.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <device_matrix/device_matrix.h>
#include <glog/logging.h>
#include <memory>
using namespace cuda;
template <typename FloatT>
__global__
void inverse_kernel(FloatT* const input) {
size_t offset = threadIdx.y * blockDim.x + threadIdx.x;
input[offset] = -input[offset];
}
int main(int argc, char* argv[]) {
google::InitGoogleLogging(argv[0]);
const hipStream_t stream = 0; // default CUDA stream.
std::unique_ptr<device_matrix<float32>> a(
device_matrix<float32>::create(
stream,
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0},
2 /* num_rows */, 3 /* num_columns */));
LAUNCH_KERNEL(
hipLaunchKernelGGL(( inverse_kernel)
, dim3(1), /* a single block */
dim3(a->getRows(), a->getCols()), /* one thread per component */
0,
stream,
a->getData()));
hipDeviceSynchronize();
print_matrix(*a);
}
|
4f3e098c29bb198960f7c74d2fd1216f5d3a7041.cu
|
#include <device_matrix/device_matrix.h>
#include <glog/logging.h>
#include <memory>
using namespace cuda;
template <typename FloatT>
__global__
void inverse_kernel(FloatT* const input) {
size_t offset = threadIdx.y * blockDim.x + threadIdx.x;
input[offset] = -input[offset];
}
int main(int argc, char* argv[]) {
google::InitGoogleLogging(argv[0]);
const cudaStream_t stream = 0; // default CUDA stream.
std::unique_ptr<device_matrix<float32>> a(
device_matrix<float32>::create(
stream,
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0},
2 /* num_rows */, 3 /* num_columns */));
LAUNCH_KERNEL(
inverse_kernel
<<<1, /* a single block */
dim3(a->getRows(), a->getCols()), /* one thread per component */
0,
stream>>>(
a->getData()));
cudaDeviceSynchronize();
print_matrix(*a);
}
|
41e3cb7f510a0006829cca8ab72768cc83b65caa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
//#include <windows.h>
#include <GL/glew.h>
#include <GL/glut.h>
//#include "SOIL.H"
#include "device.h"
//#include <cutil_inline.h>
//#include <cutil_gl_inline.h>
extern "C" void mydraw (hipSurfaceObject_t sf, float time,unsigned int width, unsigned height);
#include <hip/hip_runtime_api.h>
#include <cuda_gl_interop.h>
unsigned int width=128;
unsigned int height=96;
GLuint gl_tex,gl_buffer;
struct cudaGraphicsResource* cuda_tex;
hipError_t e;
void displayCB();
void mouseCB(int button, int state, int x, int y);
void mouseMotionCB(int x, int y);
void reshapeCB(int w, int h);
//view parameter
bool mouseLeftDown;
bool mouseRightDown;
float mouseX, mouseY;
float cameraAngleX;
float cameraAngleY;
float cameraDistance;
__global__ void setKernel(hipSurfaceObject_t outputSurfObj,
int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uchar4 data;
// Read from input surface
data=make_uchar4(32,64,128,255);
// Write to output surface
surf2Dwrite(data, outputSurfObj, x * 4, y);
}
}
int main(int argc, char **argv){
// Initialize OpenGL and GLUT for device 0
// and make the OpenGL context current
printf("debug00:\n");
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(100,100);
glutInitWindowSize(512,512);
glutCreateWindow(argv[0]);
glutDisplayFunc(displayCB);
glutMouseFunc(mouseCB);
glutMotionFunc(mouseMotionCB);
glutReshapeFunc(reshapeCB);
glEnable(GL_TEXTURE_2D);
//gen gl_tex
glGenTextures(1, &gl_tex);
//bind gl_tex
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
//update gl_tex
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
hipSetDevice(0);\
//hipGraphicsGLRegisterImage(&cuda_tex,gl_tex,GL_TEXTURE_2D,hipGraphicsMapFlagsWriteDiscard)
// glGenBuffersARB(1,&gl_buffer);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB,gl_buffer);
//unsigned int size=width*height*4*sizeof(unsigned char);
//glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,size,0,GL_DYNAMIC_DRAW);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB,0);
e = hipGraphicsGLRegisterImage(&cuda_tex, gl_tex, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore);
e = hipGraphicsGLRegisterBuffer(&cuda_tex,gl_buffer,hipGraphicsMapFlagsWriteDiscard);
//main loop
glutMainLoop();
}
void displayCB(){
//cuda write tex
// unsigned char* g_image;
hipGraphicsMapResources(1,&cuda_tex,0);
hipArray_t cuda_array;
e = hipGraphicsSubResourceGetMappedArray(&cuda_array, cuda_tex, 0, 0);
// Specify surface
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
// Create the surface objects
resDesc.res.array.array = cuOutputArray;
hipSurfaceObject_t outputSurfObj = 0;
hipCreateSurfaceObject(&outputSurfObj, &resDesc);
// Invoke kernel
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( setKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, outputSurfObj,width, height);
//size_t num_bytes;
//hipGraphicsResourceGetMappedPointer((void**)&g_image,&num_bytes,cuda_tex);
//mydraw(g_image,glutGet(GLUT_ELAPSED_TIME)*1e-3f,width,height);
hipGraphicsUnmapResources(1,&cuda_tex,0);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, gl_buffer);
// glBindTexture(GL_TEXTURE_2D, gl_tex);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// tramsform camera
glClearColor(0.5f,0.5f,0.5f,0.5f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0,0,1,0,0,0,0,1,0);
glTranslatef(0, 0, cameraDistance);
glRotatef(cameraAngleX, 1, 0, 0); // pitch
glRotatef(cameraAngleY, 0, 1, 0); // heading
//render
/*
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
*/
glBindTexture(GL_TEXTURE_2D, gl_tex);
glBegin(GL_QUADS);
glNormal3f(0, 0, 1);
//glColor4f(0.5, 0.5, 0.5, 0.5);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 0.0f);
glEnd();
glBegin(GL_TRIANGLES);
glNormal3f(0, 0, 1);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f);
glEnd();
//glBindTexture(GL_TEXTURE_2D, 0);
glutSwapBuffers();
glutPostRedisplay();
}
void mouseCB(int button, int state, int x, int y){
mouseX = (float)x;
mouseY = (float)y;
if(button == GLUT_LEFT_BUTTON)
{
if(state == GLUT_DOWN)
{
mouseLeftDown = true;
}
else if(state == GLUT_UP)
mouseLeftDown = false;
}
else if(button == GLUT_RIGHT_BUTTON)
{
if(state == GLUT_DOWN)
{
mouseRightDown = true;
}
else if(state == GLUT_UP)
mouseRightDown = false;
}
}
void mouseMotionCB(int x, int y){
if(mouseLeftDown)
{
cameraAngleY += 0.2f*(x - mouseX);
cameraAngleX += 0.2f*(y - mouseY);
mouseX = (float)x;
mouseY = (float)y;
}
if(mouseRightDown)
{
cameraDistance += (y - mouseY) * 0.1f;
mouseY = (float)y;
}
//printf("cameraDistance %f%\n",cameraDistance);
glutPostRedisplay();
}
void reshapeCB(int w, int h){
// set viewport to be the entire window
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
// set perspective viewing frustum
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glFrustum(-aspectRatio, aspectRatio, -1, 1, 1, 100);
gluPerspective(36.0f, (float)(w)/h, 0.0f, 1000.0f); // FOV, AspectRatio, NearClip, FarClip
// switch to modelview matrix in order to set scene
glMatrixMode(GL_MODELVIEW);
}
|
41e3cb7f510a0006829cca8ab72768cc83b65caa.cu
|
#include <stdio.h>
//#include <windows.h>
#include <GL/glew.h>
#include <GL/glut.h>
//#include "SOIL.H"
#include "device.h"
//#include <cutil_inline.h>
//#include <cutil_gl_inline.h>
extern "C" void mydraw (cudaSurfaceObject_t sf, float time,unsigned int width, unsigned height);
#include <cuda_runtime_api.h>
#include <cuda_gl_interop.h>
unsigned int width=128;
unsigned int height=96;
GLuint gl_tex,gl_buffer;
struct cudaGraphicsResource* cuda_tex;
cudaError e;
void displayCB();
void mouseCB(int button, int state, int x, int y);
void mouseMotionCB(int x, int y);
void reshapeCB(int w, int h);
//view parameter
bool mouseLeftDown;
bool mouseRightDown;
float mouseX, mouseY;
float cameraAngleX;
float cameraAngleY;
float cameraDistance;
__global__ void setKernel(cudaSurfaceObject_t outputSurfObj,
int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uchar4 data;
// Read from input surface
data=make_uchar4(32,64,128,255);
// Write to output surface
surf2Dwrite(data, outputSurfObj, x * 4, y);
}
}
int main(int argc, char **argv){
// Initialize OpenGL and GLUT for device 0
// and make the OpenGL context current
printf("debug00:\n");
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(100,100);
glutInitWindowSize(512,512);
glutCreateWindow(argv[0]);
glutDisplayFunc(displayCB);
glutMouseFunc(mouseCB);
glutMotionFunc(mouseMotionCB);
glutReshapeFunc(reshapeCB);
glEnable(GL_TEXTURE_2D);
//gen gl_tex
glGenTextures(1, &gl_tex);
//bind gl_tex
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
//update gl_tex
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
glBindTexture(GL_TEXTURE_2D, 0);
cudaSetDevice(0);\
//cudaGraphicsGLRegisterImage(&cuda_tex,gl_tex,GL_TEXTURE_2D,cudaGraphicsMapFlagsWriteDiscard)
// glGenBuffersARB(1,&gl_buffer);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB,gl_buffer);
//unsigned int size=width*height*4*sizeof(unsigned char);
//glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,size,0,GL_DYNAMIC_DRAW);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB,0);
e = cudaGraphicsGLRegisterImage(&cuda_tex, gl_tex, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore);
e = cudaGraphicsGLRegisterBuffer(&cuda_tex,gl_buffer,cudaGraphicsMapFlagsWriteDiscard);
//main loop
glutMainLoop();
}
void displayCB(){
//cuda write tex
// unsigned char* g_image;
cudaGraphicsMapResources(1,&cuda_tex,0);
cudaArray_t cuda_array;
e = cudaGraphicsSubResourceGetMappedArray(&cuda_array, cuda_tex, 0, 0);
// Specify surface
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
// Create the surface objects
resDesc.res.array.array = cuOutputArray;
cudaSurfaceObject_t outputSurfObj = 0;
cudaCreateSurfaceObject(&outputSurfObj, &resDesc);
// Invoke kernel
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,
(height + dimBlock.y - 1) / dimBlock.y);
setKernel<<<dimGrid, dimBlock>>>(outputSurfObj,width, height);
//size_t num_bytes;
//cudaGraphicsResourceGetMappedPointer((void**)&g_image,&num_bytes,cuda_tex);
//mydraw(g_image,glutGet(GLUT_ELAPSED_TIME)*1e-3f,width,height);
cudaGraphicsUnmapResources(1,&cuda_tex,0);
//glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, gl_buffer);
// glBindTexture(GL_TEXTURE_2D, gl_tex);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// tramsform camera
glClearColor(0.5f,0.5f,0.5f,0.5f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0,0,1,0,0,0,0,1,0);
glTranslatef(0, 0, cameraDistance);
glRotatef(cameraAngleX, 1, 0, 0); // pitch
glRotatef(cameraAngleY, 0, 1, 0); // heading
//render
/*
float pixels[] = {
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f
};
glBindTexture(GL_TEXTURE_2D, gl_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
*/
glBindTexture(GL_TEXTURE_2D, gl_tex);
glBegin(GL_QUADS);
glNormal3f(0, 0, 1);
//glColor4f(0.5, 0.5, 0.5, 0.5);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 0.0f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 0.0f);
glEnd();
glBegin(GL_TRIANGLES);
glNormal3f(0, 0, 1);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f);
glEnd();
//glBindTexture(GL_TEXTURE_2D, 0);
glutSwapBuffers();
glutPostRedisplay();
}
void mouseCB(int button, int state, int x, int y){
mouseX = (float)x;
mouseY = (float)y;
if(button == GLUT_LEFT_BUTTON)
{
if(state == GLUT_DOWN)
{
mouseLeftDown = true;
}
else if(state == GLUT_UP)
mouseLeftDown = false;
}
else if(button == GLUT_RIGHT_BUTTON)
{
if(state == GLUT_DOWN)
{
mouseRightDown = true;
}
else if(state == GLUT_UP)
mouseRightDown = false;
}
}
void mouseMotionCB(int x, int y){
if(mouseLeftDown)
{
cameraAngleY += 0.2f*(x - mouseX);
cameraAngleX += 0.2f*(y - mouseY);
mouseX = (float)x;
mouseY = (float)y;
}
if(mouseRightDown)
{
cameraDistance += (y - mouseY) * 0.1f;
mouseY = (float)y;
}
//printf("cameraDistance %f%\n",cameraDistance);
glutPostRedisplay();
}
void reshapeCB(int w, int h){
// set viewport to be the entire window
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
// set perspective viewing frustum
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glFrustum(-aspectRatio, aspectRatio, -1, 1, 1, 100);
gluPerspective(36.0f, (float)(w)/h, 0.0f, 1000.0f); // FOV, AspectRatio, NearClip, FarClip
// switch to modelview matrix in order to set scene
glMatrixMode(GL_MODELVIEW);
}
|
00314332ec24bb6d9f0e47bc3dd0e97399d61dc9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_mass_flux_z [2][2];
static int dims_advec_mom_kernel_mass_flux_z_h [2][2] = {0};
//user function
__device__
inline void advec_mom_kernel_mass_flux_z_gpu(ACC<double> &node_flux,
const ACC<double> &mass_flux_z) {
node_flux(0,0,0) = 0.125 * ( mass_flux_z(-1,0,0) + mass_flux_z(0,0,0) +
mass_flux_z(-1,0,1) + mass_flux_z(0,0,1) +
mass_flux_z(-1,-1,0) + mass_flux_z(0,-1,0) +
mass_flux_z(-1,-1,1) + mass_flux_z(0,-1,1) );
}
__global__ void ops_advec_mom_kernel_mass_flux_z(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_z[0][0] * dims_advec_mom_kernel_mass_flux_z[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_z[1][0] * dims_advec_mom_kernel_mass_flux_z[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_mass_flux_z[0][0], dims_advec_mom_kernel_mass_flux_z[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel_mass_flux_z[1][0], dims_advec_mom_kernel_mass_flux_z[1][1], arg1);
advec_mom_kernel_mass_flux_z_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,135)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(135,"advec_mom_kernel_mass_flux_z");
OPS_kernels[135].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_mass_flux_z_h[0][0] || ydim0 != dims_advec_mom_kernel_mass_flux_z_h[0][1] || xdim1 != dims_advec_mom_kernel_mass_flux_z_h[1][0] || ydim1 != dims_advec_mom_kernel_mass_flux_z_h[1][1]) {
dims_advec_mom_kernel_mass_flux_z_h[0][0] = xdim0;
dims_advec_mom_kernel_mass_flux_z_h[0][1] = ydim0;
dims_advec_mom_kernel_mass_flux_z_h[1][0] = xdim1;
dims_advec_mom_kernel_mass_flux_z_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_mass_flux_z, dims_advec_mom_kernel_mass_flux_z_h, sizeof(dims_advec_mom_kernel_mass_flux_z)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[135].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel_mass_flux_z), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[135].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[135].mpi_time += t2-t1;
OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 135;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 135;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(135,"advec_mom_kernel_mass_flux_z");
}
ops_enqueue_kernel(desc);
}
#endif
|
00314332ec24bb6d9f0e47bc3dd0e97399d61dc9.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_mass_flux_z [2][2];
static int dims_advec_mom_kernel_mass_flux_z_h [2][2] = {0};
//user function
__device__
inline void advec_mom_kernel_mass_flux_z_gpu(ACC<double> &node_flux,
const ACC<double> &mass_flux_z) {
node_flux(0,0,0) = 0.125 * ( mass_flux_z(-1,0,0) + mass_flux_z(0,0,0) +
mass_flux_z(-1,0,1) + mass_flux_z(0,0,1) +
mass_flux_z(-1,-1,0) + mass_flux_z(0,-1,0) +
mass_flux_z(-1,-1,1) + mass_flux_z(0,-1,1) );
}
__global__ void ops_advec_mom_kernel_mass_flux_z(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_z[0][0] * dims_advec_mom_kernel_mass_flux_z[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_z[1][0] * dims_advec_mom_kernel_mass_flux_z[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_mass_flux_z[0][0], dims_advec_mom_kernel_mass_flux_z[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel_mass_flux_z[1][0], dims_advec_mom_kernel_mass_flux_z[1][1], arg1);
advec_mom_kernel_mass_flux_z_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,135)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(135,"advec_mom_kernel_mass_flux_z");
OPS_kernels[135].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_mass_flux_z_h[0][0] || ydim0 != dims_advec_mom_kernel_mass_flux_z_h[0][1] || xdim1 != dims_advec_mom_kernel_mass_flux_z_h[1][0] || ydim1 != dims_advec_mom_kernel_mass_flux_z_h[1][1]) {
dims_advec_mom_kernel_mass_flux_z_h[0][0] = xdim0;
dims_advec_mom_kernel_mass_flux_z_h[0][1] = ydim0;
dims_advec_mom_kernel_mass_flux_z_h[1][0] = xdim1;
dims_advec_mom_kernel_mass_flux_z_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_mass_flux_z, dims_advec_mom_kernel_mass_flux_z_h, sizeof(dims_advec_mom_kernel_mass_flux_z)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[135].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel_mass_flux_z<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[135].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[135].mpi_time += t2-t1;
OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 135;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 135;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(135,"advec_mom_kernel_mass_flux_z");
}
ops_enqueue_kernel(desc);
}
#endif
|
507b8e23028e1382195abad838582bd9dce073da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <register_traits.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
if (a.GammaBasis() != b.GammaBasis()) \
errorQuda("gamma basis does not match: %d %d", a.GammaBasis(), b.GammaBasis()); \
}
namespace quda {
namespace blas {
hipStream_t* getStream();
namespace copy_ns {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int parity = blockIdx.y;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i, parity);
Y.save(x, i, parity);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
const int nParity;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length, int nParity)
: X(X), Y(Y), length(length/nParity), nParity(nParity) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( copyKernel<FloatN, N>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = nParity;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid.y = nParity;
}
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length*nParity;
}
int tuningIter() const { return 3; }
};
void copy(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n", src.SiteSubset(), dst.SiteSubset());
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
if (dst.Nspin() != src.Nspin())
errorQuda("Spins (%d,%d) do not match", dst.Nspin(), src.Nspin());
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas::bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
qudaMemcpy(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
qudaMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), hipMemcpyDeviceToDevice);
blas::bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, double2, 6, 1>, Spinor<float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, float2, 1, 0, 0> src_tex(src);
Spinor<float2, double2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, Spinor<float2, double2, 1, 1>, Spinor<float2, float2, 1, 0, 0> >
copy(dst_spinor, src_tex, src.Length()/2, src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, double2, 3, 1>, Spinor<float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, double2, 6, 0, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, 6, 1>, Spinor<float4, double2, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, double2, 1, 0, 0> src_tex(src);
Spinor<float2, float2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, Spinor<float2, float2, 1, 1>, Spinor<float2, double2, 1, 0, 0> >
copy(dst_spinor, src_tex, src.Length()/2, src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, double2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, 3, 1>, Spinor<float2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, 6, 1>, Spinor<float4, short4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, 3, 1>, Spinor<float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, short4, 6, 1>, Spinor<float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, short2, 3, 1>, Spinor<float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, short4, 12, 0, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, 12, 1>, Spinor<double2, short4, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, short2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, 3, 1>, Spinor<double2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0, 0> src_tex(src);
Spinor<double2, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, short4, 12, 1>, Spinor<double2, double2, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0, 0> src_tex(src);
Spinor<double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, short2, 3, 1>, Spinor<double2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy_nw
void copy(ColorSpinorField &dst, const ColorSpinorField &src) {
if (dst.Location() == QUDA_CUDA_FIELD_LOCATION &&
src.Location() == QUDA_CUDA_FIELD_LOCATION) {
copy_ns::copy(static_cast<cudaColorSpinorField&>(dst),
static_cast<const cudaColorSpinorField&>(src));
} else {
dst = src;
}
}
} // namespace blas
} // namespace quda
|
507b8e23028e1382195abad838582bd9dce073da.cu
|
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <register_traits.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
if (a.GammaBasis() != b.GammaBasis()) \
errorQuda("gamma basis does not match: %d %d", a.GammaBasis(), b.GammaBasis()); \
}
namespace quda {
namespace blas {
cudaStream_t* getStream();
namespace copy_ns {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int parity = blockIdx.y;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i, parity);
Y.save(x, i, parity);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
const int nParity;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length, int nParity)
: X(X), Y(Y), length(length/nParity), nParity(nParity) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
copyKernel<FloatN, N><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = nParity;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid.y = nParity;
}
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION) bytes += sizeof(float);
return bytes*length*nParity;
}
int tuningIter() const { return 3; }
};
void copy(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n", src.SiteSubset(), dst.SiteSubset());
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
if (dst.Nspin() != src.Nspin())
errorQuda("Spins (%d,%d) do not match", dst.Nspin(), src.Nspin());
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas::bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
qudaMemcpy(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice);
if (dst.Precision() == QUDA_HALF_PRECISION) {
qudaMemcpy(dst.Norm(), src.Norm(), dst.NormBytes(), cudaMemcpyDeviceToDevice);
blas::bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, double2, 6, 1>, Spinor<float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, float2, 1, 0, 0> src_tex(src);
Spinor<float2, double2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, Spinor<float2, double2, 1, 1>, Spinor<float2, float2, 1, 0, 0> >
copy(dst_spinor, src_tex, src.Length()/2, src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, double2, 3, 1>, Spinor<float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, double2, 6, 0, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, 6, 1>, Spinor<float4, double2, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, double2, 1, 0, 0> src_tex(src);
Spinor<float2, float2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, Spinor<float2, float2, 1, 1>, Spinor<float2, double2, 1, 0, 0> >
copy(dst_spinor, src_tex, src.Length()/2, src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, double2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, 3, 1>, Spinor<float2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, float4, 6, 1>, Spinor<float4, short4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, float2, 3, 1>, Spinor<float2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, Spinor<float4, short4, 6, 1>, Spinor<float4, float4, 6, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, Spinor<float2, short2, 3, 1>, Spinor<float2, float2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, short4, 12, 0, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, double2, 12, 1>, Spinor<double2, short4, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, short2, 3, 0, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, double2, 3, 1>, Spinor<double2, short2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0, 0> src_tex(src);
Spinor<double2, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, Spinor<double2, short4, 12, 1>, Spinor<double2, double2, 12, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0, 0> src_tex(src);
Spinor<double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, Spinor<double2, short2, 3, 1>, Spinor<double2, double2, 3, 0, 0> >
copy(dst_spinor, src_tex, src.Volume(), src.SiteSubset());
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy_nw
void copy(ColorSpinorField &dst, const ColorSpinorField &src) {
if (dst.Location() == QUDA_CUDA_FIELD_LOCATION &&
src.Location() == QUDA_CUDA_FIELD_LOCATION) {
copy_ns::copy(static_cast<cudaColorSpinorField&>(dst),
static_cast<const cudaColorSpinorField&>(src));
} else {
dst = src;
}
}
} // namespace blas
} // namespace quda
|
1a9682e7028b7ae608ee1cfab0b3e9de0cc1649d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgerbt_kernels.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Adrien REMY
*/
#include "common_magma.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_selementary_multiplication_devfunc(
magma_int_t n,
float *dA, magma_int_t ldda,
float *du,
float *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2)&&(idy < n/2)){
dA += idx + idy * ldda;
float a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ float u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x]=du[0];
u2[threadIdx.x]=du[n/2];
v1[threadIdx.y]=dv[0];
v2[threadIdx.y]=dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_selementary_multiplication_kernel(
magma_int_t n,
float *dA, magma_int_t offsetA, magma_int_t ldda,
float *du, magma_int_t offsetu,
float *dv, magma_int_t offsetv)
{
magmablas_selementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_selementary_multiplication_kernel_batched(
magma_int_t n,
float **dA_array, magma_int_t offsetA, magma_int_t ldda,
float *du, magma_int_t offsetu,
float *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_selementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_sapply_vector_devfunc(
magma_int_t n,
float *du, float *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2){
du += idx;
db += idx;
float a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_vector_kernel(
magma_int_t n,
float *du, magma_int_t offsetu, float *db, magma_int_t offsetb )
{
magmablas_sapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_vector_kernel_batched(
magma_int_t n,
float *du, magma_int_t offsetu, float **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_sapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_sapply_transpose_vector_devfunc(
magma_int_t n,
float *du,float *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2){
du += idx;
db += idx;
float a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_transpose_vector_kernel(
magma_int_t n,
float *du, magma_int_t offsetu, float *db, magma_int_t offsetb )
{
magmablas_sapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_transpose_vector_kernel_batched(
magma_int_t n,
float *du, magma_int_t offsetu, float **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_sapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
1a9682e7028b7ae608ee1cfab0b3e9de0cc1649d.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgerbt_kernels.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Adrien REMY
*/
#include "common_magma.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_selementary_multiplication_devfunc(
magma_int_t n,
float *dA, magma_int_t ldda,
float *du,
float *dv)
{
magma_int_t idx, idy;
idx = blockIdx.x * blockDim.x + threadIdx.x;
idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < n/2)&&(idy < n/2)){
dA += idx + idy * ldda;
float a00, a10, a01, a11, b1, b2, b3, b4;
__shared__ float u1[block_height], u2[block_height], v1[block_width], v2[block_width];
du += idx;
dv += idy;
u1[threadIdx.x]=du[0];
u2[threadIdx.x]=du[n/2];
v1[threadIdx.y]=dv[0];
v2[threadIdx.y]=dv[n/2];
__syncthreads();
a00 = dA[0];
a01 = dA[ldda*n/2];
a10 = dA[n/2];
a11 = dA[ldda*n/2+n/2];
b1 = a00 + a01;
b2 = a10 + a11;
b3 = a00 - a01;
b4 = a10 - a11;
dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2);
dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4);
dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2);
dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_selementary_multiplication_kernel(
magma_int_t n,
float *dA, magma_int_t offsetA, magma_int_t ldda,
float *du, magma_int_t offsetu,
float *dv, magma_int_t offsetv)
{
magmablas_selementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_selementary_multiplication_kernel_batched(
magma_int_t n,
float **dA_array, magma_int_t offsetA, magma_int_t ldda,
float *du, magma_int_t offsetu,
float *dv, magma_int_t offsetv)
{
int batchid = blockIdx.z;
magmablas_selementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_sapply_vector_devfunc(
magma_int_t n,
float *du, float *db)
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2){
du += idx;
db += idx;
float a1,a2;
a1 = du[0]*db[0];
a2 = du[n/2]*db[n/2];
db[0] = a1 + a2;
db[n/2] = a1 -a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_vector_kernel(
magma_int_t n,
float *du, magma_int_t offsetu, float *db, magma_int_t offsetb )
{
magmablas_sapply_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_vector_kernel_batched(
magma_int_t n,
float *du, magma_int_t offsetu, float **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_sapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
magmablas_sapply_transpose_vector_devfunc(
magma_int_t n,
float *du,float *db )
{
magma_int_t idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n/2){
du += idx;
db += idx;
float a1,a2;
a1 = db[0] + db[n/2];
a2 = db[0] - db[n/2];
db[0] = du[0]*a1;
db[n/2] = du[n/2]*a2;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_transpose_vector_kernel(
magma_int_t n,
float *du, magma_int_t offsetu, float *db, magma_int_t offsetb )
{
magmablas_sapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
magmablas_sapply_transpose_vector_kernel_batched(
magma_int_t n,
float *du, magma_int_t offsetu, float **db_array, magma_int_t offsetb )
{
int batchid = blockIdx.y;
magmablas_sapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
26d937f86d815f4f130d1ea5ed3d3917dbf9fe6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "HugeCTR/include/loss.hpp"
#include "HugeCTR/include/utils.cuh"
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, int device_id, int total_gpu_count,
float scaler)
: label_tensors_(1, label_tensor),
input_tensors_(1, input_tensor),
loss_tensors_(1, loss_tensor),
regularizer_(regularizer),
device_id_(device_id),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {}
template <typename T>
void Loss<T>::compute(bool is_train, hipStream_t stream) {
CudaDeviceContext context(get_device_id());
const auto &input_tensor = input_tensors_[0];
const auto &label_tensor = label_tensors_[0];
const auto &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor->get_dims();
const auto &label_dim = label_tensor->get_dims();
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
int batch_size = row_major ? input_dim[0] : input_dim[1];
int feature_dim = row_major ? input_dim[1] : input_dim[0];
T *input = input_tensor->get_ptr();
const float *label = label_tensor->get_ptr();
float *loss = loss_tensor->get_ptr();
float rterm = 0.0f;
if (regularizer_) {
regularizer_->compute_rterm(stream);
rterm = regularizer_->get_rterm();
}
do_compute(input, label, loss, batch_size, feature_dim, scaler_, rterm, is_train, stream);
if (is_train && regularizer_) {
regularizer_->initialize_wgrad(stream);
}
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer,
int device_id, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (input_tensor->get_format() != label_tensor->get_format())
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
const auto &input_dim = input_tensor->get_dims();
const auto &label_dim = label_tensor->get_dims();
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
int feature_dim = row_major ? input_dim[1] : input_dim[0];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (row_major && input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
if (!row_major && input_dim[1] != label_dim[1])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, bool row_major,
float scaler, float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = row_major ? i * feature_dim : i;
id2 = row_major ? i * feature_dim + 1 : i + batch_size;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
bool row_major = (Loss<T>::input_tensors_[0]->get_format() == TensorFormat_t::HW);
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
hipLaunchKernelGGL(( CrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream, input, label, loss, batch_size,
Loss<T>::total_gpu_count_, feature_dim,
row_major, scaler, rterm, is_train);
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, int device_id, int total_gpu_count,
float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (input_tensor->get_format() != label_tensor->get_format())
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
const auto &input_dim = input_tensor->get_dims();
int feature_dim = row_major ? input_dim[1] : input_dim[0];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, hipStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
hipLaunchKernelGGL(( BinaryCrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream,
input, label, loss, scaler, batch_size, Loss<T>::total_gpu_count_, rterm, is_train);
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
int labels_per_sample = feature_dim;
hipMemsetAsync(loss, 0, Loss<T>::loss_tensors_[0]->get_size(), stream);
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_->get_ptr();
hipLaunchKernelGGL(( MultiCrossEntropy_Kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, stream,
input, label, target_weight, loss, batch_size, Loss<T>::total_gpu_count_, labels_per_sample,
scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(
const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, const std::vector<float> &target_weight,
int device_id, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (label_tensor->get_dims().size() != 2 || label_tensor->get_format() != TensorFormat_t::HW ||
input_tensor->get_dims().size() != 2 || input_tensor->get_format() != TensorFormat_t::HW ||
label_tensor->get_dims()[0] != input_tensor->get_dims()[0] ||
label_tensor->get_dims()[1] != input_tensor->get_dims()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor->get_dims()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
internal_buff_.reset(new GeneralBuffer<float>());
std::vector<size_t> twdim = {1, label_tensor->get_dims()[1]};
target_weight_.reset(new Tensor<float>(twdim, internal_buff_, TensorFormat_t::HW));
internal_buff_->init(device_id);
CudaDeviceContext context(device_id);
CK_CUDA_THROW_(hipMemcpy(target_weight_->get_ptr(), target_weight.data(),
target_weight_->get_size(), hipMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
|
26d937f86d815f4f130d1ea5ed3d3917dbf9fe6c.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "HugeCTR/include/loss.hpp"
#include "HugeCTR/include/utils.cuh"
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, int device_id, int total_gpu_count,
float scaler)
: label_tensors_(1, label_tensor),
input_tensors_(1, input_tensor),
loss_tensors_(1, loss_tensor),
regularizer_(regularizer),
device_id_(device_id),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {}
template <typename T>
void Loss<T>::compute(bool is_train, cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
const auto &input_tensor = input_tensors_[0];
const auto &label_tensor = label_tensors_[0];
const auto &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor->get_dims();
const auto &label_dim = label_tensor->get_dims();
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
int batch_size = row_major ? input_dim[0] : input_dim[1];
int feature_dim = row_major ? input_dim[1] : input_dim[0];
T *input = input_tensor->get_ptr();
const float *label = label_tensor->get_ptr();
float *loss = loss_tensor->get_ptr();
float rterm = 0.0f;
if (regularizer_) {
regularizer_->compute_rterm(stream);
rterm = regularizer_->get_rterm();
}
do_compute(input, label, loss, batch_size, feature_dim, scaler_, rterm, is_train, stream);
if (is_train && regularizer_) {
regularizer_->initialize_wgrad(stream);
}
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer,
int device_id, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (input_tensor->get_format() != label_tensor->get_format())
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
const auto &input_dim = input_tensor->get_dims();
const auto &label_dim = label_tensor->get_dims();
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
int feature_dim = row_major ? input_dim[1] : input_dim[0];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (row_major && input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
if (!row_major && input_dim[1] != label_dim[1])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, bool row_major,
float scaler, float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = row_major ? i * feature_dim : i;
id2 = row_major ? i * feature_dim + 1 : i + batch_size;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
bool row_major = (Loss<T>::input_tensors_[0]->get_format() == TensorFormat_t::HW);
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
CrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(input, label, loss, batch_size,
Loss<T>::total_gpu_count_, feature_dim,
row_major, scaler, rterm, is_train);
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, int device_id, int total_gpu_count,
float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (input_tensor->get_format() != label_tensor->get_format())
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
bool row_major = (input_tensor->get_format() == TensorFormat_t::HW);
const auto &input_dim = input_tensor->get_dims();
int feature_dim = row_major ? input_dim[1] : input_dim[0];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, cudaStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
BinaryCrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(
input, label, loss, scaler, batch_size, Loss<T>::total_gpu_count_, rterm, is_train);
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
int labels_per_sample = feature_dim;
cudaMemsetAsync(loss, 0, Loss<T>::loss_tensors_[0]->get_size(), stream);
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_->get_ptr();
MultiCrossEntropy_Kernel<<<GRID_SIZE, BLOCK_SIZE, 0, stream>>>(
input, label, target_weight, loss, batch_size, Loss<T>::total_gpu_count_, labels_per_sample,
scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(
const std::shared_ptr<const Tensor<float>> &label_tensor,
const std::shared_ptr<Tensor<T>> &input_tensor,
const std::shared_ptr<Tensor<float>> &loss_tensor,
const std::shared_ptr<Regularizer<T>> regularizer, const std::vector<float> &target_weight,
int device_id, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, device_id, total_gpu_count,
scaler) {
if (label_tensor->get_dims().size() != 2 || label_tensor->get_format() != TensorFormat_t::HW ||
input_tensor->get_dims().size() != 2 || input_tensor->get_format() != TensorFormat_t::HW ||
label_tensor->get_dims()[0] != input_tensor->get_dims()[0] ||
label_tensor->get_dims()[1] != input_tensor->get_dims()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor->get_dims()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
internal_buff_.reset(new GeneralBuffer<float>());
std::vector<size_t> twdim = {1, label_tensor->get_dims()[1]};
target_weight_.reset(new Tensor<float>(twdim, internal_buff_, TensorFormat_t::HW));
internal_buff_->init(device_id);
CudaDeviceContext context(device_id);
CK_CUDA_THROW_(cudaMemcpy(target_weight_->get_ptr(), target_weight.data(),
target_weight_->get_size(), cudaMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
|
368d8f80df0ec649f28965c0cc8c344a53a079bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/render_functions.hpp"
#define numThreadsPerBlock_1d 32
#define numThreadsPerBlock 1024
#define LIMB_MPI {0,1, 2,3, 3,4, 5,6, 6,7, 8,9, 9,10, 11,12, 12,13}
#define LIMB_COCO {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,0, 0,14, 14,16, 0,15, 15,17, 2,16, 5,17}
#define LIMB_COCO_NOEAR {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,0, 0,14, 14,16, 0,15, 15,17}
//#define LIMB_COCO_NOEAR {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,15, 14,15, 14,16, 15,17, 0,15}
namespace caffe {
inline __device__ void getColor(float* c, float v, float vmin, float vmax)
{
c[0] = c[1] = c[2] = 255; // b, g, r, white
float dv;
if (v < vmin)
v = vmin;
if (v > vmax)
v = vmax;
dv = vmax - vmin;
if (v < (vmin + 0.125 * dv)) {
c[0] = 256 * (0.5 + (v * 4)); //B: 0.5 ~ 1
c[1] = c[2] = 0;
} else if (v < (vmin + 0.375 * dv)) {
c[0] = 255;
c[1] = 256 * (v - 0.125) * 4; //G: 0 ~ 1
c[2] = 0;
} else if (v < (vmin + 0.625 * dv)) {
c[0] = 256 * (-4 * v + 2.5); //B: 1 ~ 0
c[1] = 255;
c[2] = 256 * (4 * (v - 0.375)); //R: 0 ~ 1
} else if (v < (vmin + 0.875 * dv)) {
c[0] = 0;
c[1] = 256 * (-4 * v + 3.5); //G: 1 ~ 0
c[2] = 255;
} else {
c[0] = 0;
c[1] = 0;
c[2] = 256 * (-4 * v + 4.5); //R: 1 ~ 0.5
}
}
inline __device__ void getColor2(float* c, float v, float vmin, float vmax)
{
c[0] = c[1] = c[2] = 255; // b, g, r, white
if (v < vmin)
v = vmin;
if (v > vmax)
v = vmax;
v = 55*v;
const int RY = 15;
const int YG = 6;
const int GC = 4;
const int CB = 11;
const int BM = 13;
const int MR = 6;
//const int ncols = RY+YG+GC+CB+BM+MR; // 55
if (v < RY) {
c[0] = 255; //B: 0.5 ~ 1
c[1] = 255*(v/(RY));
c[2] = 0;
} else if (v < RY+YG) {
c[0] = 255 - 255*((v-RY)/(YG));
c[1] = 255;
c[2] = 0;
} else if (v < RY+YG+GC) {
c[0] = 0;
c[1] = 255;
c[2] = 255*((v-RY-YG)/(GC));
} else if (v < RY+YG+GC+CB) {
c[0] = 0;
c[1] = 255 - 255*((v-RY-YG-GC)/(CB));
c[2] = 255;
} else if (v < RY+YG+GC+CB+BM) {
c[0] = 255*((v-RY-YG-GC-CB)/(BM));
c[1] = 0;
c[2] = 255;
} else if (v < RY+YG+GC+CB+BM+MR) {
c[0] = 255;
c[1] = 0;
c[2] = 255-255*((v-RY-YG-GC-CB-BM)/(MR));
} else {
c[0] = 255;
c[1] = 0;
c[2] = 0;
}
}
inline __device__ void getColorXY(float* c, float x, float y) {
float rad = sqrt( x*x + y*y );
float a = atan2(-y,-x)/M_PI;
float fk = (a+1)/2.0; // 0 to 1
if (::isnan(fk)) fk = 0;
// fk = 1-exp(-fk*2);
if (rad>1) rad = 1;
//if (rad>0.5) rad = 1;
getColor2(c, fk, 0, 1);
// c[0] = 255*(1 - rad*(1-c[0]/255));
// c[1] = 255*(1 - rad*(1-c[1]/255));
// c[2] = 255*(1 - rad*(1-c[2]/255));
c[0] = 255*(rad*(c[0]/255));
c[1] = 255*(rad*(c[1]/255));
c[2] = 255*(rad*(c[2]/255));
}
inline __device__ void cubic_interpolation(float &out, float &v0, float &v1, float &v2, float &v3, float dx) {
// Dtype a = (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3);
// Dtype b = (v0 - 2.5f * v1 + 2.0 * v2 - 0.5 * v3);
// Dtype c = (-0.5f * v0 + 0.5f * v2);
// out = ((a * dx + b) * dx + c) * dx + v1;
out = (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3) * dx * dx * dx
+ (v0 - 2.5f * v1 + 2.0 * v2 - 0.5 * v3) * dx * dx
+ (-0.5f * v0 + 0.5f * v2) * dx
+ v1;
}
__global__ void render_pose_29parts(float* dst_pointer, int w_canvas, int h_canvas, float ratio_to_origin,
float* poses, int boxsize, int num_people, float threshold){
const int NUM_PARTS = 15;
//poses has length 3 * 15 * num_people
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int plotted = 0;
int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float shared_poses[NUM_PARTS*3*RENDER_MAX_PEOPLE];
if(global_idx < num_people * NUM_PARTS){
shared_poses[3*global_idx] = poses[3*global_idx]; //x
shared_poses[3*global_idx+1] = poses[3*global_idx+1]; //y
shared_poses[3*global_idx+2] = poses[3*global_idx+2]; //v
}
__syncthreads();
const int limb[] = LIMB_MPI;
const int nlimb = sizeof(limb)/(2*sizeof(int));
int color[27] = {255, 0, 0,
255, 170, 0,
170, 255, 0,
0, 255, 0,
0, 255, 170,
0, 170, 255,
0, 0, 255,
170, 0, 255,
255, 0, 170};
//float offset = ratio_to_origin * 0.5 - 0.5;
float radius = 3*h_canvas / 200.0f;
float stickwidth = h_canvas / 60.0f;
if(x < w_canvas && y < h_canvas){
//if(x == 0 && y == 0){
float b, g, r;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < num_people; p++){
for(int l = 0; l < nlimb; l++){
float b_sqrt = stickwidth * stickwidth; //fixed
float alpha = 0.6;
int part_a = limb[2*l];
int part_b = limb[2*l+1];
float x_a = (shared_poses[p*NUM_PARTS*3 + part_a*3]); // * ratio_to_origin + offset;
float x_b = (shared_poses[p*NUM_PARTS*3 + part_b*3]); // * ratio_to_origin + offset;
float y_a = (shared_poses[p*NUM_PARTS*3 + part_a*3 + 1]); // * ratio_to_origin + offset;
float y_b = (shared_poses[p*NUM_PARTS*3 + part_b*3 + 1]); // * ratio_to_origin + offset;
float value_a = shared_poses[p*NUM_PARTS*3 + part_a*3 + 2];
float value_b = shared_poses[p*NUM_PARTS*3 + part_b*3 + 2];
if(value_a > threshold && value_b > threshold){
float x_p = (x_a + x_b) / 2;
float y_p = (y_a + y_b) / 2;
float angle = atan2f(y_b - y_a, x_b - x_a);
float sine = sinf(angle);
float cosine = cosf(angle);
float a_sqrt = (x_a - x_p) * (x_a - x_p) + (y_a - y_p) * (y_a - y_p);
if (l==0) {
a_sqrt *= 1.2;
b_sqrt = a_sqrt;
// alpha *= 0.5;
}
float A = cosine * (x - x_p) + sine * (y - y_p);
float B = sine * (x - x_p) - cosine * (y - y_p);
float judge = A * A / a_sqrt + B * B / b_sqrt;
float minV = 0;
if (l==0) {
minV = 0.8;
}
if(judge>= minV && judge <= 1){
b = (1-alpha) * b + alpha * color[l*3+2];
g = (1-alpha) * g + alpha * color[l*3+1];
r = (1-alpha) * r + alpha * color[l*3];
//plotted = 1;
}
}
}
for(int i = 0; i < NUM_PARTS; i++) { //for every point
float local_x = shared_poses[p*NUM_PARTS*3 + i*3];
float local_y = shared_poses[p*NUM_PARTS*3 + i*3 + 1];
float value = shared_poses[p*NUM_PARTS*3 + i*3 + 2];
float pose_x_on_image = local_x; // * ratio_to_origin + offset;
float pose_y_on_image = local_y; // * ratio_to_origin + offset;
if(value > threshold) {
if((x - pose_x_on_image) * (x - pose_x_on_image) + (y - pose_y_on_image) * (y - pose_y_on_image) <= radius * radius){
b = 0.6 * b + 0.4 * color[(i%9)*3+2];
g = 0.6 * g + 0.4 * color[(i%9)*3+1];
r = 0.6 * r + 0.4 * color[(i%9)*3];
}
}
}
}
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_29parts_heatmap(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int part){
const int NUM_PARTS = 15;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float value = (part == NUM_PARTS-1) ? 1 : 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
float temp[4];
int offset_src = p * offset3 + part * offset2;
for(int i = 0; i < 4; i++){
cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
}
cubic_interpolation(value_this, temp[0], temp[1], temp[2], temp[3], dy);
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
value = value_this;
// }
}
}
float c[3];
if (part<16){
getColor(c, value, 0, 1);
} else {
getColor(c, value, -1, 1);
}
b = 0.5 * b + 0.5 * c[0];
g = 0.5 * g + 0.5 * c[1];
r = 0.5 * r + 0.5 * c[2];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
void render_mpi_parts(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses, vector<int> num_people, int part){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 15;
int N = num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
float threshold = 0.0;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(num_people_this_frame != 0){
if(part == 0){
// render_pose_website<<<threadsPerBlock, numBlocks>>>
VLOG(4) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
hipLaunchKernelGGL(( render_pose_29parts), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, ratio_to_origin,
poses+offset_pose_so_far, boxsize,
num_people_this_frame, threshold);
}
else if (part > 0) {
//render_pose_website_heatmap<<<threadsPerBlock, numBlocks>>>
//LOG(ERROR) << "GPU part num: " << part-1;
hipLaunchKernelGGL(( render_pose_29parts_heatmap), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
}
} else {
if (part > 0) {
hipLaunchKernelGGL(( render_pose_29parts_heatmap), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
// render_pose_website_heatmap_empty<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas);
}
}
//LOG(ERROR) << "num_people[i] = " << num_people[i];
hipDeviceSynchronize();
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
//////////////////////////////////////////////////////////////////////////////
// COCO
__global__ void render_pose_coco_parts(float* dst_pointer, int w_canvas, int h_canvas, float ratio_to_origin,
float* poses, int boxsize, int num_people, float threshold, bool googly_eyes){
const int NUM_PARTS = 18;
//poses has length 3 * 15 * num_people
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int plotted = 0;
int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float shared_poses[NUM_PARTS*3*RENDER_MAX_PEOPLE];
__shared__ float2 shared_mins[RENDER_MAX_PEOPLE];
__shared__ float2 shared_maxs[RENDER_MAX_PEOPLE];
__shared__ float2 shared_scalef[RENDER_MAX_PEOPLE];
if(global_idx < num_people ){
int p = global_idx;
shared_mins[p].x = w_canvas;
shared_mins[p].y = h_canvas;
shared_maxs[p].x = 0;
shared_maxs[p].y = 0;
for (int part=0;part<NUM_PARTS;part++) {
float x = poses[p*NUM_PARTS*3 + part*3];
float y = poses[p*NUM_PARTS*3 + part*3+1];
float z = poses[p*NUM_PARTS*3 + part*3+2];
shared_poses[p*NUM_PARTS*3 + part*3] = x;
shared_poses[p*NUM_PARTS*3 + part*3+1] = y; //y
shared_poses[p*NUM_PARTS*3 + part*3+2] = z; //v
if (z>threshold) {
if (x<shared_mins[p].x) shared_mins[p].x = x;
if (x>shared_maxs[p].x) shared_maxs[p].x = x;
if (y<shared_mins[p].y) shared_mins[p].y = y;
if (y>shared_maxs[p].y) shared_maxs[p].y = y;
}
}
shared_scalef[p].x = shared_maxs[p].x-shared_mins[p].x;
shared_scalef[p].y = shared_maxs[p].y-shared_mins[p].y;
shared_scalef[p].x = (shared_scalef[p].x+shared_scalef[p].y)/2.0;
if (shared_scalef[p].x<200) {
shared_scalef[p].x = shared_scalef[p].x/200;
if (shared_scalef[p].x<0.33) shared_scalef[p].x = 0.33;
} else {
shared_scalef[p].x = 1.0;
}
shared_maxs[p].x += 50;
shared_maxs[p].y += 50;
shared_mins[p].x -= 50;
shared_mins[p].y -= 50;
}
__syncthreads();
const int limb[] = LIMB_COCO_NOEAR;
const int nlimb = sizeof(limb)/(2*sizeof(int));
/*
const int color[27] = {255, 0, 0,
255, 170, 0,
170, 255, 0,
0, 255, 0,
0, 255, 170,
0, 170, 255,
0, 0, 255,
170, 0, 255,
255, 0, 170};
*/
const int color[] = {
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
85, 0, 255,
170, 0, 255,
255, 0, 255,
255, 0, 170,
255, 0, 85};
const int nColor = sizeof(color)/(3*sizeof(int));
//float offset = ratio_to_origin * 0.5 - 0.5;
float radius = 2*h_canvas / 200.0f;
float stickwidth = h_canvas / 120.0f;
if(x < w_canvas && y < h_canvas){
//if(x == 0 && y == 0){
float b, g, r;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < num_people; p++){
if (x>shared_maxs[p].x || x<shared_mins[p].x
|| y>shared_maxs[p].y || y<shared_mins[p].y) {
continue;
}
for(int l = 0; l < nlimb; l++){
float b_sqrt = shared_scalef[p].x*shared_scalef[p].x*stickwidth * stickwidth; //fixed
float alpha = 0.5;
int part_a = limb[2*l];
int part_b = limb[2*l+1];
float x_a = (shared_poses[p*NUM_PARTS*3 + part_a*3]); // * ratio_to_origin + offset;
float x_b = (shared_poses[p*NUM_PARTS*3 + part_b*3]); // * ratio_to_origin + offset;
float y_a = (shared_poses[p*NUM_PARTS*3 + part_a*3 + 1]); // * ratio_to_origin + offset;
float y_b = (shared_poses[p*NUM_PARTS*3 + part_b*3 + 1]); // * ratio_to_origin + offset;
float value_a = shared_poses[p*NUM_PARTS*3 + part_a*3 + 2];
float value_b = shared_poses[p*NUM_PARTS*3 + part_b*3 + 2];
if (0 && (l==nlimb-1 || l==nlimb-5)) {
float x_c = (shared_poses[p*NUM_PARTS*3 + 14*3 + 0]); // * ratio_to_origin + offset;
float y_c = (shared_poses[p*NUM_PARTS*3 + 14*3 + 1]); // * ratio_to_origin + offset;
float value_c = shared_poses[p*NUM_PARTS*3 + 14*3 + 2];
if (value_c>threshold) {
x_b = (x_c+x_b)/2;
y_b = (y_c+y_b)/2;
} else {
continue;
}
}
if(value_a > threshold && value_b > threshold){
float x_p = (x_a + x_b) / 2;
float y_p = (y_a + y_b) / 2;
float angle = atan2f(y_b - y_a, x_b - x_a);
float sine = sinf(angle);
float cosine = cosf(angle);
float a_sqrt = (x_a - x_p) * (x_a - x_p) + (y_a - y_p) * (y_a - y_p);
float A = cosine * (x - x_p) + sine * (y - y_p);
float B = sine * (x - x_p) - cosine * (y - y_p);
float judge = A * A / a_sqrt + B * B / b_sqrt;
float minV = 0;
float maxV = 1;
float3 co;
co.x = color[(l%nColor)*3+0];
co.y = color[(l%nColor)*3+1];
co.z = color[(l%nColor)*3+2];
if ( 0 && (l==nlimb-4 || l==nlimb-1 || l==nlimb-2 || l==nlimb-3 || l==nlimb-5)) {
// float nx = cosine;
// float ny = sine;
// float px = nx*(x-x_a) + ny*(y-y_a);
float lw = 8;
if (l==nlimb-1) {
lw = 2;
}
if (B>-lw && B<lw) {
judge = A/sqrt(a_sqrt);
} else {
judge = 2;
}
minV = -1;
maxV = 1;
alpha = 0.9;
co.x = 0; co.y = 0; co.z = 0;
if (l==nlimb-5) {
maxV = -0.3;
alpha = 0.3*(1-(judge+1)/0.8);
co.x = 255; co.y = 255; co.z = 255;
}
}
if(judge>= minV && judge <= maxV){
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
//plotted = 1;
}
}
}
for(int i = 0; i < NUM_PARTS; i++) { //for every point
float local_x = shared_poses[p*NUM_PARTS*3 + i*3];
float local_y = shared_poses[p*NUM_PARTS*3 + i*3 + 1];
float value = shared_poses[p*NUM_PARTS*3 + i*3 + 2];
if(value > threshold) {
float dist2 = (x - local_x) * (x - local_x) + (y - local_y) * (y - local_y);
float minr2 = 0;
float maxr2 = shared_scalef[p].x*shared_scalef[p].x*radius * radius;
float alpha = 0.6;
float3 co;
co.x = color[(i%nColor)*3+0];
co.y = color[(i%nColor)*3+1];
co.z = color[(i%nColor)*3+2];
if (googly_eyes && (i==14 || i==15)) {
maxr2 = shared_scalef[p].x*shared_scalef[p].x*2.5*2.5*radius*radius;
minr2 = shared_scalef[p].x*shared_scalef[p].x*(2.5*radius-2)*(2.5*radius-2);
alpha = 0.9;
co.x = 0; co.y = 0; co.z = 0;
if(dist2 <= maxr2){
if(dist2 <= minr2) {
co.x = 255; co.y = 255; co.z = 255;
}
if(dist2 <= minr2*0.6) {
float dist3 = (x-4 - local_x) * (x-4 - local_x) + (y - local_y+4) * (y - local_y+4);
if (dist3>3.75*3.75) {
co.x = 0; co.y = 0; co.z = 0;
}
}
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
}
} else {
if (0 && i==0) {
alpha = 0.9;
maxr2 = maxr2*2;
co.x = 0; co.y = 0; co.x = 255;
}
if(dist2>=minr2 && dist2 <= maxr2){
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
}
}
}
}
}
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_heatmap(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float value = (part == NUM_PARTS-1) ? 1 : 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
float temp[4];
int offset_src = p * offset3 + part * offset2;
for(int i = 0; i < 4; i++){
cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
}
cubic_interpolation(value_this, temp[0], temp[1], temp[2], temp[3], dy);
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
value = value_this;
// }
}
}
float c[3];
if (part<NUM_PARTS+1){
getColor(c, value, 0, 1);
} else {
getColor(c, value, -1,1);
}
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_heatmap2(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int in_part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
const int color[] = {
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
85, 0, 255,
170, 0, 255,
255, 0, 255,
255, 0, 170,
255, 0, 85};
const int nColor = sizeof(color)/(3*sizeof(int));
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float c[3];
c[0] = 0;
c[1] = 0;
c[2] = 0;
float value = 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
for (int part=in_part;part<NUM_PARTS;part++) {
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
//float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
//float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
//float dy = y_on_box - y_nei[1];
//float temp[4];
int offset_src = p * offset3 + part * offset2;
value = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
__saturatef(value);
c[0] += value*color[(part%nColor)*3+0];
c[1] += value*color[(part%nColor)*3+1];
c[2] += value*color[(part%nColor)*3+2];
// }
}
}
}
// if (part<NUM_PARTS+1){
// getColor(c, value, 0, 1);
// } else {
// getColor(c, value, -1,1);
// }
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_affinity(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_parts_accum, int num_people, int in_part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float c[3];
int count = 0;
c[0] = 0; c[1] = 0; c[2] = 0;
float value = 0;
float value2 = 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
for (int part=in_part;part<in_part+num_parts_accum*2;part+=2) {
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
//float temp[4];
int offset_src = p * offset3 + part * offset2;
if (num_parts_accum==1) {
// for(int i = 0; i < 4; i++){
// cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
// }
//
// cubic_interpolation(value, temp[0], temp[1], temp[2], temp[3], dy);
{
float a = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
float b = heatmaps[offset_src + y_nei[1]*w_net + x_nei[2]];
float c = heatmaps[offset_src + y_nei[2]*w_net + x_nei[1]];
float d = heatmaps[offset_src + y_nei[2]*w_net + x_nei[2]];
value = (1-dx)*(1-dy)*a
+ (dx)*(1-dy)*b
+ (1-dx)*(dy)*c
+ (dx)*(dy)*d;
}
offset_src = p * offset3 + (part+1) * offset2;
{
float a = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
float b = heatmaps[offset_src + y_nei[1]*w_net + x_nei[2]];
float c = heatmaps[offset_src + y_nei[2]*w_net + x_nei[1]];
float d = heatmaps[offset_src + y_nei[2]*w_net + x_nei[2]];
value2 = (1-dx)*(1-dy)*a
+ (dx)*(1-dy)*b
+ (1-dx)*(dy)*c
+ (dx)*(dy)*d;
}
// for(int i = 0; i < 4; i++){
// cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
// }
// cubic_interpolation(value2, temp[0], temp[1], temp[2], temp[3], dy);
} else {
value = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
offset_src = p * offset3 + (part+1) * offset2;
value2 = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
}
//
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
float c2[3];
// if (part%2==0) {
// value = (x-320)/sqrtf( (180)*(180) + (180)*(180));
// value2 = (y-180)/sqrtf( (180)*(180) + (180)*(180));
// }
getColorXY(c2, value, value2);
c[0] += c2[0];
c[1] += c2[1];
c[2] += c2[2];
count++;
// }
}
}
}
if (c[0]>255) c[0] = 255;
if (c[1]>255) c[1] = 255;
if (c[2]>255) c[2] = 255;
// c[0] /= count;
// c[1] /= count;
// c[2] /= count;
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
void render_coco_parts(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses, vector<int> num_people, int part, bool googly_eyes){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 18;
int N = 1;//num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
float threshold = 0.01;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(part == 0 ){
// render_pose_website<<<threadsPerBlock, numBlocks>>>
VLOG(4) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(num_people_this_frame != 0){
hipLaunchKernelGGL(( render_pose_coco_parts), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, ratio_to_origin,
poses+offset_pose_so_far, boxsize,
num_people_this_frame, threshold, googly_eyes);
}
} else if (part > 0 && part<58) {
//render_pose_website_heatmap<<<threadsPerBlock, numBlocks>>>
//LOG(ERROR) << "GPU part num: " << part-1;
if (part-1==NUM_PARTS) {
hipLaunchKernelGGL(( render_pose_coco_heatmap2), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
0);
} else {
hipLaunchKernelGGL(( render_pose_coco_heatmap), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
}
}
//LOG(ERROR) << "num_people[i] = " << num_people[i];
CUDA_CHECK(hipDeviceSynchronize());
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
void render_coco_aff(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses,
vector<int> num_people, int part, int num_parts_accum){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 18;
int N = 1;//num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
//float threshold = 0.01;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
//float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
int aff_part = part;
hipLaunchKernelGGL(( render_pose_coco_affinity), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_parts_accum, num_people_this_frame,
aff_part);
// render_pose_website_heatmap_empty<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas);
//LOG(ERROR) << "num_people[i] = " << num_people[i];
CUDA_CHECK(hipDeviceSynchronize());
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
} // namespace caffe
|
368d8f80df0ec649f28965c0cc8c344a53a079bb.cu
|
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/render_functions.hpp"
#define numThreadsPerBlock_1d 32
#define numThreadsPerBlock 1024
#define LIMB_MPI {0,1, 2,3, 3,4, 5,6, 6,7, 8,9, 9,10, 11,12, 12,13}
#define LIMB_COCO {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,0, 0,14, 14,16, 0,15, 15,17, 2,16, 5,17}
#define LIMB_COCO_NOEAR {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,0, 0,14, 14,16, 0,15, 15,17}
//#define LIMB_COCO_NOEAR {1,2, 1,5, 2,3, 3,4, 5,6, 6,7, 1,8, 8,9, 9,10, 1,11, 11,12, 12,13, 1,15, 14,15, 14,16, 15,17, 0,15}
namespace caffe {
inline __device__ void getColor(float* c, float v, float vmin, float vmax)
{
c[0] = c[1] = c[2] = 255; // b, g, r, white
float dv;
if (v < vmin)
v = vmin;
if (v > vmax)
v = vmax;
dv = vmax - vmin;
if (v < (vmin + 0.125 * dv)) {
c[0] = 256 * (0.5 + (v * 4)); //B: 0.5 ~ 1
c[1] = c[2] = 0;
} else if (v < (vmin + 0.375 * dv)) {
c[0] = 255;
c[1] = 256 * (v - 0.125) * 4; //G: 0 ~ 1
c[2] = 0;
} else if (v < (vmin + 0.625 * dv)) {
c[0] = 256 * (-4 * v + 2.5); //B: 1 ~ 0
c[1] = 255;
c[2] = 256 * (4 * (v - 0.375)); //R: 0 ~ 1
} else if (v < (vmin + 0.875 * dv)) {
c[0] = 0;
c[1] = 256 * (-4 * v + 3.5); //G: 1 ~ 0
c[2] = 255;
} else {
c[0] = 0;
c[1] = 0;
c[2] = 256 * (-4 * v + 4.5); //R: 1 ~ 0.5
}
}
inline __device__ void getColor2(float* c, float v, float vmin, float vmax)
{
c[0] = c[1] = c[2] = 255; // b, g, r, white
if (v < vmin)
v = vmin;
if (v > vmax)
v = vmax;
v = 55*v;
const int RY = 15;
const int YG = 6;
const int GC = 4;
const int CB = 11;
const int BM = 13;
const int MR = 6;
//const int ncols = RY+YG+GC+CB+BM+MR; // 55
if (v < RY) {
c[0] = 255; //B: 0.5 ~ 1
c[1] = 255*(v/(RY));
c[2] = 0;
} else if (v < RY+YG) {
c[0] = 255 - 255*((v-RY)/(YG));
c[1] = 255;
c[2] = 0;
} else if (v < RY+YG+GC) {
c[0] = 0;
c[1] = 255;
c[2] = 255*((v-RY-YG)/(GC));
} else if (v < RY+YG+GC+CB) {
c[0] = 0;
c[1] = 255 - 255*((v-RY-YG-GC)/(CB));
c[2] = 255;
} else if (v < RY+YG+GC+CB+BM) {
c[0] = 255*((v-RY-YG-GC-CB)/(BM));
c[1] = 0;
c[2] = 255;
} else if (v < RY+YG+GC+CB+BM+MR) {
c[0] = 255;
c[1] = 0;
c[2] = 255-255*((v-RY-YG-GC-CB-BM)/(MR));
} else {
c[0] = 255;
c[1] = 0;
c[2] = 0;
}
}
inline __device__ void getColorXY(float* c, float x, float y) {
float rad = sqrt( x*x + y*y );
float a = atan2(-y,-x)/M_PI;
float fk = (a+1)/2.0; // 0 to 1
if (::isnan(fk)) fk = 0;
// fk = 1-exp(-fk*2);
if (rad>1) rad = 1;
//if (rad>0.5) rad = 1;
getColor2(c, fk, 0, 1);
// c[0] = 255*(1 - rad*(1-c[0]/255));
// c[1] = 255*(1 - rad*(1-c[1]/255));
// c[2] = 255*(1 - rad*(1-c[2]/255));
c[0] = 255*(rad*(c[0]/255));
c[1] = 255*(rad*(c[1]/255));
c[2] = 255*(rad*(c[2]/255));
}
inline __device__ void cubic_interpolation(float &out, float &v0, float &v1, float &v2, float &v3, float dx) {
// Dtype a = (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3);
// Dtype b = (v0 - 2.5f * v1 + 2.0 * v2 - 0.5 * v3);
// Dtype c = (-0.5f * v0 + 0.5f * v2);
// out = ((a * dx + b) * dx + c) * dx + v1;
out = (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3) * dx * dx * dx
+ (v0 - 2.5f * v1 + 2.0 * v2 - 0.5 * v3) * dx * dx
+ (-0.5f * v0 + 0.5f * v2) * dx
+ v1;
}
__global__ void render_pose_29parts(float* dst_pointer, int w_canvas, int h_canvas, float ratio_to_origin,
float* poses, int boxsize, int num_people, float threshold){
const int NUM_PARTS = 15;
//poses has length 3 * 15 * num_people
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int plotted = 0;
int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float shared_poses[NUM_PARTS*3*RENDER_MAX_PEOPLE];
if(global_idx < num_people * NUM_PARTS){
shared_poses[3*global_idx] = poses[3*global_idx]; //x
shared_poses[3*global_idx+1] = poses[3*global_idx+1]; //y
shared_poses[3*global_idx+2] = poses[3*global_idx+2]; //v
}
__syncthreads();
const int limb[] = LIMB_MPI;
const int nlimb = sizeof(limb)/(2*sizeof(int));
int color[27] = {255, 0, 0,
255, 170, 0,
170, 255, 0,
0, 255, 0,
0, 255, 170,
0, 170, 255,
0, 0, 255,
170, 0, 255,
255, 0, 170};
//float offset = ratio_to_origin * 0.5 - 0.5;
float radius = 3*h_canvas / 200.0f;
float stickwidth = h_canvas / 60.0f;
if(x < w_canvas && y < h_canvas){
//if(x == 0 && y == 0){
float b, g, r;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < num_people; p++){
for(int l = 0; l < nlimb; l++){
float b_sqrt = stickwidth * stickwidth; //fixed
float alpha = 0.6;
int part_a = limb[2*l];
int part_b = limb[2*l+1];
float x_a = (shared_poses[p*NUM_PARTS*3 + part_a*3]); // * ratio_to_origin + offset;
float x_b = (shared_poses[p*NUM_PARTS*3 + part_b*3]); // * ratio_to_origin + offset;
float y_a = (shared_poses[p*NUM_PARTS*3 + part_a*3 + 1]); // * ratio_to_origin + offset;
float y_b = (shared_poses[p*NUM_PARTS*3 + part_b*3 + 1]); // * ratio_to_origin + offset;
float value_a = shared_poses[p*NUM_PARTS*3 + part_a*3 + 2];
float value_b = shared_poses[p*NUM_PARTS*3 + part_b*3 + 2];
if(value_a > threshold && value_b > threshold){
float x_p = (x_a + x_b) / 2;
float y_p = (y_a + y_b) / 2;
float angle = atan2f(y_b - y_a, x_b - x_a);
float sine = sinf(angle);
float cosine = cosf(angle);
float a_sqrt = (x_a - x_p) * (x_a - x_p) + (y_a - y_p) * (y_a - y_p);
if (l==0) {
a_sqrt *= 1.2;
b_sqrt = a_sqrt;
// alpha *= 0.5;
}
float A = cosine * (x - x_p) + sine * (y - y_p);
float B = sine * (x - x_p) - cosine * (y - y_p);
float judge = A * A / a_sqrt + B * B / b_sqrt;
float minV = 0;
if (l==0) {
minV = 0.8;
}
if(judge>= minV && judge <= 1){
b = (1-alpha) * b + alpha * color[l*3+2];
g = (1-alpha) * g + alpha * color[l*3+1];
r = (1-alpha) * r + alpha * color[l*3];
//plotted = 1;
}
}
}
for(int i = 0; i < NUM_PARTS; i++) { //for every point
float local_x = shared_poses[p*NUM_PARTS*3 + i*3];
float local_y = shared_poses[p*NUM_PARTS*3 + i*3 + 1];
float value = shared_poses[p*NUM_PARTS*3 + i*3 + 2];
float pose_x_on_image = local_x; // * ratio_to_origin + offset;
float pose_y_on_image = local_y; // * ratio_to_origin + offset;
if(value > threshold) {
if((x - pose_x_on_image) * (x - pose_x_on_image) + (y - pose_y_on_image) * (y - pose_y_on_image) <= radius * radius){
b = 0.6 * b + 0.4 * color[(i%9)*3+2];
g = 0.6 * g + 0.4 * color[(i%9)*3+1];
r = 0.6 * r + 0.4 * color[(i%9)*3];
}
}
}
}
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_29parts_heatmap(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int part){
const int NUM_PARTS = 15;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float value = (part == NUM_PARTS-1) ? 1 : 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
float temp[4];
int offset_src = p * offset3 + part * offset2;
for(int i = 0; i < 4; i++){
cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
}
cubic_interpolation(value_this, temp[0], temp[1], temp[2], temp[3], dy);
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
value = value_this;
// }
}
}
float c[3];
if (part<16){
getColor(c, value, 0, 1);
} else {
getColor(c, value, -1, 1);
}
b = 0.5 * b + 0.5 * c[0];
g = 0.5 * g + 0.5 * c[1];
r = 0.5 * r + 0.5 * c[2];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
void render_mpi_parts(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses, vector<int> num_people, int part){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 15;
int N = num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
float threshold = 0.0;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(num_people_this_frame != 0){
if(part == 0){
// render_pose_website<<<threadsPerBlock, numBlocks>>>
VLOG(4) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
render_pose_29parts<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, ratio_to_origin,
poses+offset_pose_so_far, boxsize,
num_people_this_frame, threshold);
}
else if (part > 0) {
//render_pose_website_heatmap<<<threadsPerBlock, numBlocks>>>
//LOG(ERROR) << "GPU part num: " << part-1;
render_pose_29parts_heatmap<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
}
} else {
if (part > 0) {
render_pose_29parts_heatmap<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
// render_pose_website_heatmap_empty<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas);
}
}
//LOG(ERROR) << "num_people[i] = " << num_people[i];
cudaDeviceSynchronize();
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
//////////////////////////////////////////////////////////////////////////////
// COCO
__global__ void render_pose_coco_parts(float* dst_pointer, int w_canvas, int h_canvas, float ratio_to_origin,
float* poses, int boxsize, int num_people, float threshold, bool googly_eyes){
const int NUM_PARTS = 18;
//poses has length 3 * 15 * num_people
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int plotted = 0;
int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float shared_poses[NUM_PARTS*3*RENDER_MAX_PEOPLE];
__shared__ float2 shared_mins[RENDER_MAX_PEOPLE];
__shared__ float2 shared_maxs[RENDER_MAX_PEOPLE];
__shared__ float2 shared_scalef[RENDER_MAX_PEOPLE];
if(global_idx < num_people ){
int p = global_idx;
shared_mins[p].x = w_canvas;
shared_mins[p].y = h_canvas;
shared_maxs[p].x = 0;
shared_maxs[p].y = 0;
for (int part=0;part<NUM_PARTS;part++) {
float x = poses[p*NUM_PARTS*3 + part*3];
float y = poses[p*NUM_PARTS*3 + part*3+1];
float z = poses[p*NUM_PARTS*3 + part*3+2];
shared_poses[p*NUM_PARTS*3 + part*3] = x;
shared_poses[p*NUM_PARTS*3 + part*3+1] = y; //y
shared_poses[p*NUM_PARTS*3 + part*3+2] = z; //v
if (z>threshold) {
if (x<shared_mins[p].x) shared_mins[p].x = x;
if (x>shared_maxs[p].x) shared_maxs[p].x = x;
if (y<shared_mins[p].y) shared_mins[p].y = y;
if (y>shared_maxs[p].y) shared_maxs[p].y = y;
}
}
shared_scalef[p].x = shared_maxs[p].x-shared_mins[p].x;
shared_scalef[p].y = shared_maxs[p].y-shared_mins[p].y;
shared_scalef[p].x = (shared_scalef[p].x+shared_scalef[p].y)/2.0;
if (shared_scalef[p].x<200) {
shared_scalef[p].x = shared_scalef[p].x/200;
if (shared_scalef[p].x<0.33) shared_scalef[p].x = 0.33;
} else {
shared_scalef[p].x = 1.0;
}
shared_maxs[p].x += 50;
shared_maxs[p].y += 50;
shared_mins[p].x -= 50;
shared_mins[p].y -= 50;
}
__syncthreads();
const int limb[] = LIMB_COCO_NOEAR;
const int nlimb = sizeof(limb)/(2*sizeof(int));
/*
const int color[27] = {255, 0, 0,
255, 170, 0,
170, 255, 0,
0, 255, 0,
0, 255, 170,
0, 170, 255,
0, 0, 255,
170, 0, 255,
255, 0, 170};
*/
const int color[] = {
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
85, 0, 255,
170, 0, 255,
255, 0, 255,
255, 0, 170,
255, 0, 85};
const int nColor = sizeof(color)/(3*sizeof(int));
//float offset = ratio_to_origin * 0.5 - 0.5;
float radius = 2*h_canvas / 200.0f;
float stickwidth = h_canvas / 120.0f;
if(x < w_canvas && y < h_canvas){
//if(x == 0 && y == 0){
float b, g, r;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < num_people; p++){
if (x>shared_maxs[p].x || x<shared_mins[p].x
|| y>shared_maxs[p].y || y<shared_mins[p].y) {
continue;
}
for(int l = 0; l < nlimb; l++){
float b_sqrt = shared_scalef[p].x*shared_scalef[p].x*stickwidth * stickwidth; //fixed
float alpha = 0.5;
int part_a = limb[2*l];
int part_b = limb[2*l+1];
float x_a = (shared_poses[p*NUM_PARTS*3 + part_a*3]); // * ratio_to_origin + offset;
float x_b = (shared_poses[p*NUM_PARTS*3 + part_b*3]); // * ratio_to_origin + offset;
float y_a = (shared_poses[p*NUM_PARTS*3 + part_a*3 + 1]); // * ratio_to_origin + offset;
float y_b = (shared_poses[p*NUM_PARTS*3 + part_b*3 + 1]); // * ratio_to_origin + offset;
float value_a = shared_poses[p*NUM_PARTS*3 + part_a*3 + 2];
float value_b = shared_poses[p*NUM_PARTS*3 + part_b*3 + 2];
if (0 && (l==nlimb-1 || l==nlimb-5)) {
float x_c = (shared_poses[p*NUM_PARTS*3 + 14*3 + 0]); // * ratio_to_origin + offset;
float y_c = (shared_poses[p*NUM_PARTS*3 + 14*3 + 1]); // * ratio_to_origin + offset;
float value_c = shared_poses[p*NUM_PARTS*3 + 14*3 + 2];
if (value_c>threshold) {
x_b = (x_c+x_b)/2;
y_b = (y_c+y_b)/2;
} else {
continue;
}
}
if(value_a > threshold && value_b > threshold){
float x_p = (x_a + x_b) / 2;
float y_p = (y_a + y_b) / 2;
float angle = atan2f(y_b - y_a, x_b - x_a);
float sine = sinf(angle);
float cosine = cosf(angle);
float a_sqrt = (x_a - x_p) * (x_a - x_p) + (y_a - y_p) * (y_a - y_p);
float A = cosine * (x - x_p) + sine * (y - y_p);
float B = sine * (x - x_p) - cosine * (y - y_p);
float judge = A * A / a_sqrt + B * B / b_sqrt;
float minV = 0;
float maxV = 1;
float3 co;
co.x = color[(l%nColor)*3+0];
co.y = color[(l%nColor)*3+1];
co.z = color[(l%nColor)*3+2];
if ( 0 && (l==nlimb-4 || l==nlimb-1 || l==nlimb-2 || l==nlimb-3 || l==nlimb-5)) {
// float nx = cosine;
// float ny = sine;
// float px = nx*(x-x_a) + ny*(y-y_a);
float lw = 8;
if (l==nlimb-1) {
lw = 2;
}
if (B>-lw && B<lw) {
judge = A/sqrt(a_sqrt);
} else {
judge = 2;
}
minV = -1;
maxV = 1;
alpha = 0.9;
co.x = 0; co.y = 0; co.z = 0;
if (l==nlimb-5) {
maxV = -0.3;
alpha = 0.3*(1-(judge+1)/0.8);
co.x = 255; co.y = 255; co.z = 255;
}
}
if(judge>= minV && judge <= maxV){
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
//plotted = 1;
}
}
}
for(int i = 0; i < NUM_PARTS; i++) { //for every point
float local_x = shared_poses[p*NUM_PARTS*3 + i*3];
float local_y = shared_poses[p*NUM_PARTS*3 + i*3 + 1];
float value = shared_poses[p*NUM_PARTS*3 + i*3 + 2];
if(value > threshold) {
float dist2 = (x - local_x) * (x - local_x) + (y - local_y) * (y - local_y);
float minr2 = 0;
float maxr2 = shared_scalef[p].x*shared_scalef[p].x*radius * radius;
float alpha = 0.6;
float3 co;
co.x = color[(i%nColor)*3+0];
co.y = color[(i%nColor)*3+1];
co.z = color[(i%nColor)*3+2];
if (googly_eyes && (i==14 || i==15)) {
maxr2 = shared_scalef[p].x*shared_scalef[p].x*2.5*2.5*radius*radius;
minr2 = shared_scalef[p].x*shared_scalef[p].x*(2.5*radius-2)*(2.5*radius-2);
alpha = 0.9;
co.x = 0; co.y = 0; co.z = 0;
if(dist2 <= maxr2){
if(dist2 <= minr2) {
co.x = 255; co.y = 255; co.z = 255;
}
if(dist2 <= minr2*0.6) {
float dist3 = (x-4 - local_x) * (x-4 - local_x) + (y - local_y+4) * (y - local_y+4);
if (dist3>3.75*3.75) {
co.x = 0; co.y = 0; co.z = 0;
}
}
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
}
} else {
if (0 && i==0) {
alpha = 0.9;
maxr2 = maxr2*2;
co.x = 0; co.y = 0; co.x = 255;
}
if(dist2>=minr2 && dist2 <= maxr2){
b = (1-alpha) * b + alpha * co.z;
g = (1-alpha) * g + alpha * co.y;
r = (1-alpha) * r + alpha * co.x;
}
}
}
}
}
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_heatmap(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float value = (part == NUM_PARTS-1) ? 1 : 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
float temp[4];
int offset_src = p * offset3 + part * offset2;
for(int i = 0; i < 4; i++){
cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
}
cubic_interpolation(value_this, temp[0], temp[1], temp[2], temp[3], dy);
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
value = value_this;
// }
}
}
float c[3];
if (part<NUM_PARTS+1){
getColor(c, value, 0, 1);
} else {
getColor(c, value, -1,1);
}
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_heatmap2(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_people, int in_part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
const int color[] = {
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
85, 0, 255,
170, 0, 255,
255, 0, 255,
255, 0, 170,
255, 0, 85};
const int nColor = sizeof(color)/(3*sizeof(int));
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float c[3];
c[0] = 0;
c[1] = 0;
c[2] = 0;
float value = 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
for (int part=in_part;part<NUM_PARTS;part++) {
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
//float value_this;
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
//float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
//float dy = y_on_box - y_nei[1];
//float temp[4];
int offset_src = p * offset3 + part * offset2;
value = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
__saturatef(value);
c[0] += value*color[(part%nColor)*3+0];
c[1] += value*color[(part%nColor)*3+1];
c[2] += value*color[(part%nColor)*3+2];
// }
}
}
}
// if (part<NUM_PARTS+1){
// getColor(c, value, 0, 1);
// } else {
// getColor(c, value, -1,1);
// }
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
__global__ void render_pose_coco_affinity(float* dst_pointer, int w_canvas, int h_canvas, int w_net,
int h_net, float* heatmaps, int num_parts_accum, int num_people, int in_part){
const int NUM_PARTS = 18;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//int global_idx = threadIdx.y * blockDim.x + threadIdx.x;
__syncthreads();
if(x < w_canvas && y < h_canvas){
//heatmaps has length w_net * h_net * 15
int offset3 = w_net * h_net * NUM_PARTS;
int offset2 = w_net * h_net;
float b, g, r;
float c[3];
int count = 0;
c[0] = 0; c[1] = 0; c[2] = 0;
float value = 0;
float value2 = 0;
float h_inv = (float)h_net / (float)h_canvas;
float w_inv = (float)w_net / (float)w_canvas;
// b = 255 * 0.7 + 0.3 * (image_ref[y*w + x] + 0.5) * 256;
// g = 255 * 0.7 + 0.3 * (image_ref[w*h + y*w + x] + 0.5) * 256;
// r = 255 * 0.7 + 0.3 * (image_ref[2*w*h + y*w + x] + 0.5) * 256;
b = dst_pointer[ y * w_canvas + x];
g = dst_pointer[ w_canvas * h_canvas + y * w_canvas + x];
r = dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x];
for(int p = 0; p < 1; p++){
for (int part=in_part;part<in_part+num_parts_accum*2;part+=2) {
float x_on_box = w_inv * x + (0.5 * w_inv - 0.5);
float y_on_box = h_inv * y + (0.5 * h_inv - 0.5);
if(x_on_box >= 0 && x_on_box < w_net && y_on_box >=0 && y_on_box < h_net){
int x_nei[4];
x_nei[1] = int(x_on_box + 1e-5);
x_nei[1] = (x_nei[1] < 0) ? 0 : x_nei[1];
x_nei[0] = (x_nei[1] - 1 < 0) ? x_nei[1] : (x_nei[1] - 1);
x_nei[2] = (x_nei[1] + 1 >= w_net) ? (w_net - 1) : (x_nei[1] + 1);
x_nei[3] = (x_nei[2] + 1 >= w_net) ? (w_net - 1) : (x_nei[2] + 1);
float dx = x_on_box - x_nei[1];
int y_nei[4];
y_nei[1] = int(y_on_box + 1e-5);
y_nei[1] = (y_nei[1] < 0) ? 0 : y_nei[1];
y_nei[0] = (y_nei[1] - 1 < 0) ? y_nei[1] : (y_nei[1] - 1);
y_nei[2] = (y_nei[1] + 1 >= h_net) ? (h_net - 1) : (y_nei[1] + 1);
y_nei[3] = (y_nei[2] + 1 >= h_net) ? (h_net - 1) : (y_nei[2] + 1);
float dy = y_on_box - y_nei[1];
//float temp[4];
int offset_src = p * offset3 + part * offset2;
if (num_parts_accum==1) {
// for(int i = 0; i < 4; i++){
// cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
// }
//
// cubic_interpolation(value, temp[0], temp[1], temp[2], temp[3], dy);
{
float a = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
float b = heatmaps[offset_src + y_nei[1]*w_net + x_nei[2]];
float c = heatmaps[offset_src + y_nei[2]*w_net + x_nei[1]];
float d = heatmaps[offset_src + y_nei[2]*w_net + x_nei[2]];
value = (1-dx)*(1-dy)*a
+ (dx)*(1-dy)*b
+ (1-dx)*(dy)*c
+ (dx)*(dy)*d;
}
offset_src = p * offset3 + (part+1) * offset2;
{
float a = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
float b = heatmaps[offset_src + y_nei[1]*w_net + x_nei[2]];
float c = heatmaps[offset_src + y_nei[2]*w_net + x_nei[1]];
float d = heatmaps[offset_src + y_nei[2]*w_net + x_nei[2]];
value2 = (1-dx)*(1-dy)*a
+ (dx)*(1-dy)*b
+ (1-dx)*(dy)*c
+ (dx)*(dy)*d;
}
// for(int i = 0; i < 4; i++){
// cubic_interpolation(temp[i], heatmaps[offset_src + y_nei[i]*w_net + x_nei[0]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[1]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[2]],
// heatmaps[offset_src + y_nei[i]*w_net + x_nei[3]], dx);
// }
// cubic_interpolation(value2, temp[0], temp[1], temp[2], temp[3], dy);
} else {
value = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
offset_src = p * offset3 + (part+1) * offset2;
value2 = heatmaps[offset_src + y_nei[1]*w_net + x_nei[1]];
}
//
// if(part != 14){
// if(value_this > value)
// value = value_this;
// } else {
// if(value_this < value)
float c2[3];
// if (part%2==0) {
// value = (x-320)/sqrtf( (180)*(180) + (180)*(180));
// value2 = (y-180)/sqrtf( (180)*(180) + (180)*(180));
// }
getColorXY(c2, value, value2);
c[0] += c2[0];
c[1] += c2[1];
c[2] += c2[2];
count++;
// }
}
}
}
if (c[0]>255) c[0] = 255;
if (c[1]>255) c[1] = 255;
if (c[2]>255) c[2] = 255;
// c[0] /= count;
// c[1] /= count;
// c[2] /= count;
float alpha = 0.7;
b = (1-alpha) * b + alpha * c[2];
g = (1-alpha) * g + alpha * c[1];
r = (1-alpha) * r + alpha * c[0];
dst_pointer[ y * w_canvas + x] = b; //plot dot
dst_pointer[ w_canvas * h_canvas + y * w_canvas + x] = g;
dst_pointer[2 * w_canvas * h_canvas + y * w_canvas + x] = r;
// if(x==0 && y==0){
// printf("exiting\n");
// }
}
}
void render_coco_parts(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses, vector<int> num_people, int part, bool googly_eyes){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 18;
int N = 1;//num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
float threshold = 0.01;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(part == 0 ){
// render_pose_website<<<threadsPerBlock, numBlocks>>>
VLOG(4) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
if(num_people_this_frame != 0){
render_pose_coco_parts<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, ratio_to_origin,
poses+offset_pose_so_far, boxsize,
num_people_this_frame, threshold, googly_eyes);
}
} else if (part > 0 && part<58) {
//render_pose_website_heatmap<<<threadsPerBlock, numBlocks>>>
//LOG(ERROR) << "GPU part num: " << part-1;
if (part-1==NUM_PARTS) {
render_pose_coco_heatmap2<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
0);
} else {
render_pose_coco_heatmap<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_people_this_frame,
part-1);
}
}
//LOG(ERROR) << "num_people[i] = " << num_people[i];
CUDA_CHECK(cudaDeviceSynchronize());
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
void render_coco_aff(float* canvas, int w_canvas, int h_canvas, int w_net, int h_net,
float* heatmaps, int boxsize, float* centers, float* poses,
vector<int> num_people, int part, int num_parts_accum){
//canvas, image in width * height * 3 * N
//heatmaps in w_net * h_net * 15 * (P1+P2+...+PN)
//centers in 2 * 11 * 1 * N
//poses in 3 * 1 * 15 * (P1+P2+...+PN)
//num_people has length P, indicating P1, ..., PN
const int NUM_PARTS = 18;
int N = 1;//num_people.size(); //batch size
//LOG(ERROR) << "Number of frames in batch: " << N;
//int count = 0;
//int offset_canvas = w_canvas * h_canvas * 3; // 3 because we only render one image here
int offset_heatmap = w_net * h_net * NUM_PARTS; // boxsize * boxsize * 15
//int offset_info = 33; //22
int offset_pose = NUM_PARTS*3;
//float threshold = 0.01;
int offset_pose_so_far = 0;
int offset_heatmap_so_far = 0;
//float ratio_to_origin = (float)h_canvas / (float)h_net;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(w_canvas, threadsPerBlock.x), updiv(h_canvas, threadsPerBlock.y));
for(int i = 0; i < N; i++){ //N is always 1 for website
int num_people_this_frame = num_people[i];
//LOG(ERROR) << "num_people_this_frame: " << num_people_this_frame << " ratio_to_origin: " << ratio_to_origin;
int aff_part = part;
render_pose_coco_affinity<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas, w_net, h_net,
heatmaps+offset_heatmap_so_far, num_parts_accum, num_people_this_frame,
aff_part);
// render_pose_website_heatmap_empty<<<threadsPerBlock, numBlocks>>>(canvas, w_canvas, h_canvas);
//LOG(ERROR) << "num_people[i] = " << num_people[i];
CUDA_CHECK(cudaDeviceSynchronize());
offset_pose_so_far += offset_pose * num_people[i];
offset_heatmap_so_far += offset_heatmap * num_people[i];
}
//
//LOG(ERROR) << "render_done";
}
} // namespace caffe
|
aee6f3f86cc7deac8f79f9099cd7c679720c5063.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* TP 1 - Premiers pas en CUDA
* --------------------------
* Ex 3: Filtre d'images sepia
*
* File: student.cu
* Author: Maxime MARIA
*/
#include "student.hpp"
#include "chronoGPU.hpp"
#include <algorithm>
namespace IMAC {
__global__ void sepiaCUDA(const uchar *const input, uchar *const output, const uint width, const uint height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
// en inversant les boucles on obtient le mme rsultat en 0.435808 ms au lieu de 0.060288 ms
if(idx < width && idy < height) {
const int offset = 3*(idx + idy*width);
const float red = input[offset + 0];
const float green = input[offset + 1];
const float blue = input[offset + 2];
output[offset + 0] = min(255.f, red * 0.393f + green * 0.769f + blue * 0.189f);
output[offset + 1] = min(255.f, red * 0.349f + green * 0.686f + blue * 0.168f);
output[offset + 2] = min(255.f, red * 0.272f + green * 0.534f + blue * 0.131f);
}
}
void studentJob(const std::vector<uchar> &input, const uint width, const uint height, std::vector<uchar> &output) {
ChronoGPU chrGPU;
// 2 arrays for GPU
uchar *dev_input = nullptr;
uchar *dev_output = nullptr;
// Allocate arrays on device (input and ouput)
const size_t sizeInBytes = 3 * width * height * sizeof(uchar);
std::cout << "Allocating input (2 arrays): " << ( ( 2 * sizeInBytes ) >> 20 ) << " MB on Device" << std::endl;
chrGPU.start();
// allocation
hipMalloc((void**)&dev_input, sizeInBytes); // 3 channels
hipMalloc((void**)&dev_output, sizeInBytes);
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
std::cout << "============================================" << std::endl << std::endl;
std::cout << "Process on GPU " << std::endl;
hipMemcpy(dev_input, input.data(), sizeInBytes, hipMemcpyHostToDevice);
chrGPU.start();
// Launch a kernel on the GPU
const dim3 dimThreads(32, 32);
const dim3 dimBlock(width/dimThreads.x+1, height/dimThreads.y+1);
hipLaunchKernelGGL(( sepiaCUDA), dim3(dimBlock), dim3(dimThreads) , 0, 0, dev_input, dev_output, width, height);
hipDeviceSynchronize();
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
hipMemcpy(output.data(), dev_output, sizeInBytes, hipMemcpyDeviceToHost);
hipFree(dev_input);
hipFree(dev_output);
}
}
|
aee6f3f86cc7deac8f79f9099cd7c679720c5063.cu
|
/*
* TP 1 - Premiers pas en CUDA
* --------------------------
* Ex 3: Filtre d'images sepia
*
* File: student.cu
* Author: Maxime MARIA
*/
#include "student.hpp"
#include "chronoGPU.hpp"
#include <algorithm>
namespace IMAC {
__global__ void sepiaCUDA(const uchar *const input, uchar *const output, const uint width, const uint height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
// en inversant les boucles on obtient le même résultat en 0.435808 ms au lieu de 0.060288 ms
if(idx < width && idy < height) {
const int offset = 3*(idx + idy*width);
const float red = input[offset + 0];
const float green = input[offset + 1];
const float blue = input[offset + 2];
output[offset + 0] = min(255.f, red * 0.393f + green * 0.769f + blue * 0.189f);
output[offset + 1] = min(255.f, red * 0.349f + green * 0.686f + blue * 0.168f);
output[offset + 2] = min(255.f, red * 0.272f + green * 0.534f + blue * 0.131f);
}
}
void studentJob(const std::vector<uchar> &input, const uint width, const uint height, std::vector<uchar> &output) {
ChronoGPU chrGPU;
// 2 arrays for GPU
uchar *dev_input = nullptr;
uchar *dev_output = nullptr;
// Allocate arrays on device (input and ouput)
const size_t sizeInBytes = 3 * width * height * sizeof(uchar);
std::cout << "Allocating input (2 arrays): " << ( ( 2 * sizeInBytes ) >> 20 ) << " MB on Device" << std::endl;
chrGPU.start();
// allocation
cudaMalloc((void**)&dev_input, sizeInBytes); // 3 channels
cudaMalloc((void**)&dev_output, sizeInBytes);
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
std::cout << "============================================" << std::endl << std::endl;
std::cout << "Process on GPU " << std::endl;
cudaMemcpy(dev_input, input.data(), sizeInBytes, cudaMemcpyHostToDevice);
chrGPU.start();
// Launch a kernel on the GPU
const dim3 dimThreads(32, 32);
const dim3 dimBlock(width/dimThreads.x+1, height/dimThreads.y+1);
sepiaCUDA<<<dimBlock, dimThreads >>>(dev_input, dev_output, width, height);
cudaDeviceSynchronize();
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
cudaMemcpy(output.data(), dev_output, sizeInBytes, cudaMemcpyDeviceToHost);
cudaFree(dev_input);
cudaFree(dev_output);
}
}
|
d9459362019babba56982590948e5c7e4de59db2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "benchmark.cuh"
void validate_call(const hipError_t& err)
{
if (err != hipSuccess)
{
std::cerr << "CUDA error occurred: " << err << " " << hipGetErrorString(err) << std::endl;
throw std::runtime_error("CUDA error");
}
}
void validate_call(const cudnnStatus_t& err)
{
if (err != CUDNN_STATUS_SUCCESS)
{
std::cerr << "cuDNN error occurred: " << err << " " << cudnnGetErrorString(err) << std::endl;
throw std::runtime_error("cuDNN error");
}
}
void log(int verbose, std::ostream& ostream, std::string str)
{
validate_call(hipDeviceSynchronize());
if (verbose)
ostream << str << std::endl;
}
template <typename T>
__global__ void fill_with_constant(T *px, T k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
}
bool first = true;
template <typename InputDataType, typename FilterDataType, typename OutDataType>
std::chrono::microseconds benchmark_convolution(size_t B, size_t C, size_t H, size_t W,
size_t numFilters, size_t filterH, size_t filterW,
size_t padH, size_t padW, size_t strideH, size_t strideW, size_t dilationH, size_t dilationW,
cudnnTensorFormat_t inputTensorFormat, cudnnTensorFormat_t filterTensorFormat, cudnnTensorFormat_t outputTensorFormat,
cudnnDataType_t inputDataType, cudnnDataType_t filterDataType,
cudnnDataType_t convAccumulatorDataType, cudnnDataType_t outDataType,
int verbose)
{
if (first)
{
first = false;
validate_call(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
}
cudnnHandle_t cudnn = nullptr;
cudnnTensorDescriptor_t inputDescriptor = nullptr;
InputDataType *inputData = nullptr;
cudnnFilterDescriptor_t filterDescriptor = nullptr;
FilterDataType *filterData = nullptr;
cudnnConvolutionDescriptor_t convDescriptor = nullptr;
cudnnTensorDescriptor_t outDescriptor = nullptr;
OutDataType *outData = nullptr;
cudnnConvolutionFwdAlgoPerf_t convAlgo;
void *workspaceData = nullptr;
std::chrono::microseconds elapsed;
try
{
// Create cudnn
validate_call(cudnnCreate(&cudnn));
log(verbose, std::clog, "Cudnn created");
// Create input tensor
validate_call(cudnnCreateTensorDescriptor(&inputDescriptor));
validate_call(cudnnSetTensor4dDescriptor(inputDescriptor, inputTensorFormat, inputDataType, B, C, H, W));
validate_call(hipMalloc(&inputData, B * C * H * W * sizeof(InputDataType)));
log(verbose, std::clog, "Input tensor allocated");
// Create filter descriptor
validate_call(cudnnCreateFilterDescriptor(&filterDescriptor));
validate_call(cudnnSetFilter4dDescriptor(filterDescriptor, filterDataType, filterTensorFormat, numFilters, C, filterH, filterW));
validate_call(hipMalloc(&filterData, numFilters * C * filterH * filterW * sizeof(FilterDataType)));
log(verbose, std::clog, "Filter tensor allocated");
// Convolution descriptor
validate_call(cudnnCreateConvolutionDescriptor(&convDescriptor));
validate_call(cudnnSetConvolution2dDescriptor(convDescriptor, padH, padW, strideH, strideW,
dilationH, dilationW, CUDNN_CONVOLUTION, convAccumulatorDataType));
validate_call(cudnnSetConvolutionMathType(convDescriptor, CUDNN_TENSOR_OP_MATH));
log(verbose, std::clog, "Convolution descriptor created");
int outB, outC, outH, outW;
validate_call(cudnnGetConvolution2dForwardOutputDim(convDescriptor, inputDescriptor, filterDescriptor, &outB, &outC, &outH, &outW));
log(verbose, std::clog, "Computed convolution output shape");
// Output tensor
validate_call(cudnnCreateTensorDescriptor(&outDescriptor));
validate_call(cudnnSetTensor4dDescriptor(outDescriptor, outputTensorFormat, outDataType, outB, outC, outH, outW));
validate_call(hipMalloc(&outData, outB * outC * outH * outW * sizeof(OutDataType)));
log(verbose, std::clog, "Output tensor allocated");
// Algorithm
int foundAlgo;
validate_call(cudnnFindConvolutionForwardAlgorithm(
cudnn, inputDescriptor, filterDescriptor, convDescriptor, outDescriptor, 1, &foundAlgo, &convAlgo));
if (foundAlgo == 0 || convAlgo.determinism == CUDNN_NON_DETERMINISTIC || convAlgo.status != CUDNN_STATUS_SUCCESS)
{
log(verbose, std::clog, "Best algorithm is non deterministic or not found. Terminating.");
throw std::runtime_error("Failed to find cudnn algorithm for convolution.");
}
log(verbose, std::clog, "Best algorithm is chosen " + std::to_string(convAlgo.algo) + " with math " + std::to_string(convAlgo.mathType));
if (convAlgo.mathType == CUDNN_TENSOR_OP_MATH)
log(verbose, std::clog, "Using Tensor CORES!!!");
// Workspace
size_t workspaceSize = convAlgo.memory;
if (workspaceSize != 0){}
validate_call(hipMalloc(&workspaceData, workspaceSize));
log(verbose, std::clog, "Workspace is allocated");
// Convolution
float alpha = 1.0f;
float beta = 0.0f;
// Dummy values
hipLaunchKernelGGL(( ::fill_with_constant), dim3(numFilters*filterW * filterH), dim3(C), 0, 0, filterData, (FilterDataType)2);
hipLaunchKernelGGL(( ::fill_with_constant), dim3(W * H), dim3(B * C), 0, 0, inputData, (InputDataType)1);
log(verbose, std::clog, "Filled with dummy values");
validate_call(hipDeviceSynchronize());
auto begin = std::chrono::high_resolution_clock::now();
validate_call(cudnnConvolutionForward(
cudnn,
&alpha, inputDescriptor, inputData, filterDescriptor, filterData,
convDescriptor, convAlgo.algo, workspaceData, workspaceSize,
&beta, outDescriptor, outData));
validate_call(hipDeviceSynchronize());
elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin);
log(verbose, std::clog, "Finalizing");
}
catch(const std::exception& e)
{
std::cerr << "Error during convolution forward. Returned value is 0. Releasing resources..." << '\n';
elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::seconds(0));
}
// Finalizing
if (workspaceData != nullptr)
validate_call(hipFree(workspaceData));
if (outData != nullptr)
validate_call(hipFree(outData));
if (outDescriptor != nullptr)
validate_call(cudnnDestroyTensorDescriptor(outDescriptor));
log(verbose, std::clog, "Out tensor destroyed");
if (convDescriptor != nullptr)
validate_call(cudnnDestroyConvolutionDescriptor(convDescriptor));
log(verbose, std::clog, "Conv descriptor destroyed");
if (filterData != nullptr)
validate_call(hipFree(filterData));
if (filterDescriptor != nullptr)
validate_call(cudnnDestroyFilterDescriptor(filterDescriptor));
log(verbose, std::clog, "Filter tensor destroyed");
if (inputData != nullptr)
validate_call(hipFree(inputData));
if (inputDescriptor != nullptr)
validate_call(cudnnDestroyTensorDescriptor(inputDescriptor));
log(verbose, std::clog, "Input tensor destroyed");
if (cudnn != nullptr)
validate_call(cudnnDestroy(cudnn));
log(verbose, std::clog, "Cudnn destroyed");
return elapsed;
};
// FLOAT CONFIG
template std::chrono::microseconds benchmark_convolution<float, float, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// TRUE_HALF CONFIG
template std::chrono::microseconds benchmark_convolution<half, half, half>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// DOUBLE CONFIG
template std::chrono::microseconds benchmark_convolution<double, double, double>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// INT8* CONFIG
template std::chrono::microseconds benchmark_convolution<int8_t, int8_t, int8_t>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// INT8*_EXT CONFIG
template std::chrono::microseconds benchmark_convolution<int8_t, int8_t, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// UINT8* CONFIG
template std::chrono::microseconds benchmark_convolution<uint8_t, int8_t, int8_t>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// UINT8*_EXT CONFIG
template std::chrono::microseconds benchmark_convolution<uint8_t, int8_t, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
|
d9459362019babba56982590948e5c7e4de59db2.cu
|
#include "benchmark.cuh"
void validate_call(const cudaError_t& err)
{
if (err != cudaSuccess)
{
std::cerr << "CUDA error occurred: " << err << " " << cudaGetErrorString(err) << std::endl;
throw std::runtime_error("CUDA error");
}
}
void validate_call(const cudnnStatus_t& err)
{
if (err != CUDNN_STATUS_SUCCESS)
{
std::cerr << "cuDNN error occurred: " << err << " " << cudnnGetErrorString(err) << std::endl;
throw std::runtime_error("cuDNN error");
}
}
void log(int verbose, std::ostream& ostream, std::string str)
{
validate_call(cudaDeviceSynchronize());
if (verbose)
ostream << str << std::endl;
}
template <typename T>
__global__ void fill_with_constant(T *px, T k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
px[tid] = k;
}
bool first = true;
template <typename InputDataType, typename FilterDataType, typename OutDataType>
std::chrono::microseconds benchmark_convolution(size_t B, size_t C, size_t H, size_t W,
size_t numFilters, size_t filterH, size_t filterW,
size_t padH, size_t padW, size_t strideH, size_t strideW, size_t dilationH, size_t dilationW,
cudnnTensorFormat_t inputTensorFormat, cudnnTensorFormat_t filterTensorFormat, cudnnTensorFormat_t outputTensorFormat,
cudnnDataType_t inputDataType, cudnnDataType_t filterDataType,
cudnnDataType_t convAccumulatorDataType, cudnnDataType_t outDataType,
int verbose)
{
if (first)
{
first = false;
validate_call(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
}
cudnnHandle_t cudnn = nullptr;
cudnnTensorDescriptor_t inputDescriptor = nullptr;
InputDataType *inputData = nullptr;
cudnnFilterDescriptor_t filterDescriptor = nullptr;
FilterDataType *filterData = nullptr;
cudnnConvolutionDescriptor_t convDescriptor = nullptr;
cudnnTensorDescriptor_t outDescriptor = nullptr;
OutDataType *outData = nullptr;
cudnnConvolutionFwdAlgoPerf_t convAlgo;
void *workspaceData = nullptr;
std::chrono::microseconds elapsed;
try
{
// Create cudnn
validate_call(cudnnCreate(&cudnn));
log(verbose, std::clog, "Cudnn created");
// Create input tensor
validate_call(cudnnCreateTensorDescriptor(&inputDescriptor));
validate_call(cudnnSetTensor4dDescriptor(inputDescriptor, inputTensorFormat, inputDataType, B, C, H, W));
validate_call(cudaMalloc(&inputData, B * C * H * W * sizeof(InputDataType)));
log(verbose, std::clog, "Input tensor allocated");
// Create filter descriptor
validate_call(cudnnCreateFilterDescriptor(&filterDescriptor));
validate_call(cudnnSetFilter4dDescriptor(filterDescriptor, filterDataType, filterTensorFormat, numFilters, C, filterH, filterW));
validate_call(cudaMalloc(&filterData, numFilters * C * filterH * filterW * sizeof(FilterDataType)));
log(verbose, std::clog, "Filter tensor allocated");
// Convolution descriptor
validate_call(cudnnCreateConvolutionDescriptor(&convDescriptor));
validate_call(cudnnSetConvolution2dDescriptor(convDescriptor, padH, padW, strideH, strideW,
dilationH, dilationW, CUDNN_CONVOLUTION, convAccumulatorDataType));
validate_call(cudnnSetConvolutionMathType(convDescriptor, CUDNN_TENSOR_OP_MATH));
log(verbose, std::clog, "Convolution descriptor created");
int outB, outC, outH, outW;
validate_call(cudnnGetConvolution2dForwardOutputDim(convDescriptor, inputDescriptor, filterDescriptor, &outB, &outC, &outH, &outW));
log(verbose, std::clog, "Computed convolution output shape");
// Output tensor
validate_call(cudnnCreateTensorDescriptor(&outDescriptor));
validate_call(cudnnSetTensor4dDescriptor(outDescriptor, outputTensorFormat, outDataType, outB, outC, outH, outW));
validate_call(cudaMalloc(&outData, outB * outC * outH * outW * sizeof(OutDataType)));
log(verbose, std::clog, "Output tensor allocated");
// Algorithm
int foundAlgo;
validate_call(cudnnFindConvolutionForwardAlgorithm(
cudnn, inputDescriptor, filterDescriptor, convDescriptor, outDescriptor, 1, &foundAlgo, &convAlgo));
if (foundAlgo == 0 || convAlgo.determinism == CUDNN_NON_DETERMINISTIC || convAlgo.status != CUDNN_STATUS_SUCCESS)
{
log(verbose, std::clog, "Best algorithm is non deterministic or not found. Terminating.");
throw std::runtime_error("Failed to find cudnn algorithm for convolution.");
}
log(verbose, std::clog, "Best algorithm is chosen " + std::to_string(convAlgo.algo) + " with math " + std::to_string(convAlgo.mathType));
if (convAlgo.mathType == CUDNN_TENSOR_OP_MATH)
log(verbose, std::clog, "Using Tensor CORES!!!");
// Workspace
size_t workspaceSize = convAlgo.memory;
if (workspaceSize != 0){}
validate_call(cudaMalloc(&workspaceData, workspaceSize));
log(verbose, std::clog, "Workspace is allocated");
// Convolution
float alpha = 1.0f;
float beta = 0.0f;
// Dummy values
::fill_with_constant<<<numFilters*filterW * filterH, C>>>(filterData, (FilterDataType)2);
::fill_with_constant<<<W * H, B * C>>>(inputData, (InputDataType)1);
log(verbose, std::clog, "Filled with dummy values");
validate_call(cudaDeviceSynchronize());
auto begin = std::chrono::high_resolution_clock::now();
validate_call(cudnnConvolutionForward(
cudnn,
&alpha, inputDescriptor, inputData, filterDescriptor, filterData,
convDescriptor, convAlgo.algo, workspaceData, workspaceSize,
&beta, outDescriptor, outData));
validate_call(cudaDeviceSynchronize());
elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin);
log(verbose, std::clog, "Finalizing");
}
catch(const std::exception& e)
{
std::cerr << "Error during convolution forward. Returned value is 0. Releasing resources..." << '\n';
elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::seconds(0));
}
// Finalizing
if (workspaceData != nullptr)
validate_call(cudaFree(workspaceData));
if (outData != nullptr)
validate_call(cudaFree(outData));
if (outDescriptor != nullptr)
validate_call(cudnnDestroyTensorDescriptor(outDescriptor));
log(verbose, std::clog, "Out tensor destroyed");
if (convDescriptor != nullptr)
validate_call(cudnnDestroyConvolutionDescriptor(convDescriptor));
log(verbose, std::clog, "Conv descriptor destroyed");
if (filterData != nullptr)
validate_call(cudaFree(filterData));
if (filterDescriptor != nullptr)
validate_call(cudnnDestroyFilterDescriptor(filterDescriptor));
log(verbose, std::clog, "Filter tensor destroyed");
if (inputData != nullptr)
validate_call(cudaFree(inputData));
if (inputDescriptor != nullptr)
validate_call(cudnnDestroyTensorDescriptor(inputDescriptor));
log(verbose, std::clog, "Input tensor destroyed");
if (cudnn != nullptr)
validate_call(cudnnDestroy(cudnn));
log(verbose, std::clog, "Cudnn destroyed");
return elapsed;
};
// FLOAT CONFIG
template std::chrono::microseconds benchmark_convolution<float, float, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// TRUE_HALF CONFIG
template std::chrono::microseconds benchmark_convolution<half, half, half>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// DOUBLE CONFIG
template std::chrono::microseconds benchmark_convolution<double, double, double>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// INT8* CONFIG
template std::chrono::microseconds benchmark_convolution<int8_t, int8_t, int8_t>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// INT8*_EXT CONFIG
template std::chrono::microseconds benchmark_convolution<int8_t, int8_t, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// UINT8* CONFIG
template std::chrono::microseconds benchmark_convolution<uint8_t, int8_t, int8_t>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
// UINT8*_EXT CONFIG
template std::chrono::microseconds benchmark_convolution<uint8_t, int8_t, float>(
size_t, size_t, size_t, size_t, size_t, size_t, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
cudnnTensorFormat_t, cudnnTensorFormat_t, cudnnTensorFormat_t,
cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, cudnnDataType_t, int);
|
c2e8b13cc8730106acbc661d095dddb4ec46164c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
// @author [email protected]
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
#ifndef PAIRWISE_INT_CU
#define PAIRWISE_INT_CU
#include "../pairwise_int.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ static void pairwiseSimpleShaped(void* vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void *vextraParams) {
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xEws;
__shared__ int yEws;
__shared__ int zEws;
__shared__ char xOrder;
__shared__ char yOrder;
__shared__ char zOrder;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
yEws = shape::elementWiseStride(yShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
yOrder = shape::order(yShapeInfo);
zOrder = shape::order(zShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
if (xEws >= 1 && yEws >= 1 && zEws >= 1 && xOrder == yOrder && xOrder == zOrder) {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
z[i * zEws] = OpType::op(x[i * xEws], y[i * yEws], extraParams);
}
}
else if (vx == vz) {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
z[xOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
else {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
}
namespace functions {
namespace pairwise_transforms {
////////////////////////////////////////////////////////////////////////////////
template<typename X>
template<typename OpType>
void _CUDA_H PairWiseIntTransform<X>::intermediateShaped(dim3& launchDims, hipStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void *vextraParams){
hipLaunchKernelGGL(( pairwiseSimpleShaped<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vextraParams);
}
////////////////////////////////////////////////////////////////////////////////
template<typename X>
void PairWiseIntTransform<X>::executeCudaShaped(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vextraParams) {
auto xType = sd::DataTypeUtils::fromT<X>();
DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vextraParams), PAIRWISE_INT_OPS);
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT PairWiseIntTransform, , INTEGER_TYPES);
}
}
#endif // PAIRWISE_INT_CU
|
c2e8b13cc8730106acbc661d095dddb4ec46164c.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
// @author [email protected]
// @author Yurii Shyrma ([email protected]), created on 08.11.2018
#ifndef PAIRWISE_INT_CU
#define PAIRWISE_INT_CU
#include "../pairwise_int.h"
using namespace simdOps;
////////////////////////////////////////////////////////////////////////////////
template <typename X, typename OpType>
__global__ static void pairwiseSimpleShaped(void* vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void *vextraParams) {
auto x = reinterpret_cast<X*>(vx);
auto y = reinterpret_cast<X*>(vy);
auto z = reinterpret_cast<X*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xEws;
__shared__ int yEws;
__shared__ int zEws;
__shared__ char xOrder;
__shared__ char yOrder;
__shared__ char zOrder;
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
yEws = shape::elementWiseStride(yShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
yOrder = shape::order(yShapeInfo);
zOrder = shape::order(zShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
if (xEws >= 1 && yEws >= 1 && zEws >= 1 && xOrder == yOrder && xOrder == zOrder) {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
z[i * zEws] = OpType::op(x[i * xEws], y[i * yEws], extraParams);
}
}
else if (vx == vz) {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
z[xOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
else {
for (Nd4jLong i = tid; i < len; i += gridDim.x * blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
}
namespace functions {
namespace pairwise_transforms {
////////////////////////////////////////////////////////////////////////////////
template<typename X>
template<typename OpType>
void _CUDA_H PairWiseIntTransform<X>::intermediateShaped(dim3& launchDims, cudaStream_t *stream,
void *vx, Nd4jLong *xShapeInfo,
void *vy, Nd4jLong *yShapeInfo,
void *vz, Nd4jLong *zShapeInfo,
void *vextraParams){
pairwiseSimpleShaped<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vextraParams);
}
////////////////////////////////////////////////////////////////////////////////
template<typename X>
void PairWiseIntTransform<X>::executeCudaShaped(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vextraParams) {
auto xType = sd::DataTypeUtils::fromT<X>();
DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vextraParams), PAIRWISE_INT_OPS);
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT PairWiseIntTransform, , INTEGER_TYPES);
}
}
#endif // PAIRWISE_INT_CU
|
9f8706725599a6ea2a02fe36fe85e3b7a29a10d0.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
9f8706725599a6ea2a02fe36fe85e3b7a29a10d0.cu
|
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
// exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
fdd5d467b1fdb75552593e3af03771fd840e2515.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
typedef float myDataType;
#define MAX_XY 32
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define cudaCheckError() { \
hipError_t error=hipGetLastError(); \
if(error!=hipSuccess) { \
printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(error)); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
__global__ void convolutionRowGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = x + k;
if(d >= 0 && d < imageW) {
sum += d_Src[y * imageW + d] * d_Filter[filterR -k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y*imageW + x] = sum;
}
__global__ void convolutionColumnGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,
int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = y + k;
if(d >= 0 && d < imageH) {
sum += d_Src[d * imageW + x] * d_Filter[filterR -k];
//printf("X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y * imageW + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
//
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
myDataType
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU;
int imageW;
int imageH;
//int i=MAX_XY;
//int count=0;
unsigned int i;
double timing;
clock_t start;
clock_t end;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
// while(1){
// if(imageW % i == 0) {
// dim3 threads(i,i);
// dim3 blocks(imageW/i,imageW/i);
// break;
// }
// i--;
// }
dim3 threads(MAX_XY,MAX_XY);
dim3 blocks (imageH/MAX_XY,imageW/MAX_XY);
// if(imageH < MAX_XY && imageW < MAX_XY){
// threads = (imageH,imageH);
// blocks = (1,1);
// }
// else{
// threads = (MAX_XY,MAX_XY);
// blocks = (imageW/MAX_XY,imageW/MAX_XY);
// }
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays and device array...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (myDataType *)malloc(FILTER_LENGTH * sizeof(myDataType));
h_Input = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_Buffer = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputCPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputGPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){
printf("Something went wrong wille malloc in CPU\n");
}
printf("Memmory allocation for host arrays: COMPLETED \n");
hipMallocManaged((void**)&d_Filter,FILTER_LENGTH * sizeof(myDataType));
hipMallocManaged((void**)&d_Input,imageH * imageW * sizeof(myDataType));
hipMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(myDataType));
hipMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(myDataType));
cudaCheckError();
printf("Memmory allocation for device arrays: COMPLETED \n");
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (myDataType)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (myDataType)rand() / ((myDataType)RAND_MAX / 255) + (myDataType)rand() / (myDataType)RAND_MAX;
}
printf("initialization of host arrays: COMPLETED \n");
hipMemcpy(d_Filter, h_Filter,FILTER_LENGTH * sizeof(myDataType),hipMemcpyHostToDevice);
hipMemcpy(d_Input, h_Input,imageH * imageW * sizeof(myDataType),hipMemcpyHostToDevice);
cudaCheckError();
printf("initialization of device arrays: COMPLETED \n\n");
printf("GPU computation...\n");
hipLaunchKernelGGL(( convolutionRowGPU), dim3(blocks),dim3(threads), 0, 0, d_Buffer,d_Input,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
hipDeviceSynchronize();
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(blocks),dim3(threads), 0, 0, d_OutputGPU,d_Buffer,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
printf("GPU computation : COMPLETED\n\n");
hipMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(myDataType),hipMemcpyDeviceToHost);
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
timing = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU computation : COMPLETED in time:%10.5f\n",timing);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("\nCPU computations == GPU computation?\n");
for (i = 0; i < imageW * imageH; i++) {
if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){
printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program...\n",i,h_OutputGPU[i]-h_OutputCPU[i]);
//count++;
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
hipFree(d_OutputGPU);
hipFree(d_Buffer);
hipFree(d_Input);
hipFree(d_Filter);
cudaCheckError();
hipDeviceReset();
return(1);
}
}
printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n");
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
hipFree(d_OutputGPU);
hipFree(d_Buffer);
hipFree(d_Input);
hipFree(d_Filter);
hipDeviceReset();
return 0;
}
|
fdd5d467b1fdb75552593e3af03771fd840e2515.cu
|
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
typedef float myDataType;
#define MAX_XY 32
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define cudaCheckError() { \
cudaError_t error=cudaGetLastError(); \
if(error!=cudaSuccess) { \
printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
__global__ void convolutionRowGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = x + k;
if(d >= 0 && d < imageW) {
sum += d_Src[y * imageW + d] * d_Filter[filterR -k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y*imageW + x] = sum;
}
__global__ void convolutionColumnGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,
int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = y + k;
if(d >= 0 && d < imageH) {
sum += d_Src[d * imageW + x] * d_Filter[filterR -k];
//printf("X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y * imageW + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
//
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
myDataType
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU;
int imageW;
int imageH;
//int i=MAX_XY;
//int count=0;
unsigned int i;
double timing;
clock_t start;
clock_t end;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
// while(1){
// if(imageW % i == 0) {
// dim3 threads(i,i);
// dim3 blocks(imageW/i,imageW/i);
// break;
// }
// i--;
// }
dim3 threads(MAX_XY,MAX_XY);
dim3 blocks (imageH/MAX_XY,imageW/MAX_XY);
// if(imageH < MAX_XY && imageW < MAX_XY){
// threads = (imageH,imageH);
// blocks = (1,1);
// }
// else{
// threads = (MAX_XY,MAX_XY);
// blocks = (imageW/MAX_XY,imageW/MAX_XY);
// }
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays and device array...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (myDataType *)malloc(FILTER_LENGTH * sizeof(myDataType));
h_Input = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_Buffer = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputCPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputGPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){
printf("Something went wrong wille malloc in CPU\n");
}
printf("Memmory allocation for host arrays: COMPLETED \n");
cudaMallocManaged((void**)&d_Filter,FILTER_LENGTH * sizeof(myDataType));
cudaMallocManaged((void**)&d_Input,imageH * imageW * sizeof(myDataType));
cudaMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(myDataType));
cudaMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(myDataType));
cudaCheckError();
printf("Memmory allocation for device arrays: COMPLETED \n");
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (myDataType)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (myDataType)rand() / ((myDataType)RAND_MAX / 255) + (myDataType)rand() / (myDataType)RAND_MAX;
}
printf("initialization of host arrays: COMPLETED \n");
cudaMemcpy(d_Filter, h_Filter,FILTER_LENGTH * sizeof(myDataType),cudaMemcpyHostToDevice);
cudaMemcpy(d_Input, h_Input,imageH * imageW * sizeof(myDataType),cudaMemcpyHostToDevice);
cudaCheckError();
printf("initialization of device arrays: COMPLETED \n\n");
printf("GPU computation...\n");
convolutionRowGPU<<<blocks,threads>>>(d_Buffer,d_Input,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
cudaDeviceSynchronize();
convolutionColumnGPU<<<blocks,threads>>>(d_OutputGPU,d_Buffer,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
printf("GPU computation : COMPLETED\n\n");
cudaMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(myDataType),cudaMemcpyDeviceToHost);
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
timing = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU computation : COMPLETED in time:%10.5f\n",timing);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("\nCPU computations == GPU computation?\n");
for (i = 0; i < imageW * imageH; i++) {
if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){
printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program...\n",i,h_OutputGPU[i]-h_OutputCPU[i]);
//count++;
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaCheckError();
cudaDeviceReset();
return(1);
}
}
printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n");
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaDeviceReset();
return 0;
}
|
e71a81fe27a92d6c38e080ee4a6acb5ad02c9a42.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_wilson_clover_preconditioned.cuh>
/**
This is the Wilson-clover preconditioned linear operator
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct WilsonCloverPreconditionedLaunch {
static constexpr const char *kernel = "quda::wilsonCloverPreconditionedGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream)
{
static_assert(nParity == 1, "preconditioned wilson-clover operator only defined for nParity=1");
if (xpay && dagger) errorQuda("xpay operator only defined for not dagger");
dslash.launch(wilsonCloverPreconditionedGPU < Float, nDim, nColor, nParity, dagger && !xpay, xpay && !dagger,
kernel_type, Arg >, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class WilsonCloverPreconditioned : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
WilsonCloverPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_wilson_clover_preconditioned.cuh"),
arg(arg),
in(in)
{
}
virtual ~WilsonCloverPreconditioned() {}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
if (arg.nParity == 1) {
if (arg.xpay)
Dslash<Float>::template instantiate<WilsonCloverPreconditionedLaunch, nDim, nColor, 1, true>(tp, arg, stream);
else
Dslash<Float>::template instantiate<WilsonCloverPreconditionedLaunch, nDim, nColor, 1, false>(tp, arg, stream);
} else {
errorQuda("Preconditioned Wilson-clover operator not defined nParity=%d", arg.nParity);
}
}
long long flops() const
{
int clover_flops = 504;
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops += clover_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
flops += clover_flops * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += clover_flops * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops -= clover_flops * ghost_sites;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0);
long long bytes = Dslash<Float>::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes += clover_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
bytes += clover_bytes * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes -= clover_bytes * ghost_sites;
break;
}
return bytes;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverPreconditionedApply {
inline WilsonCloverPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
constexpr int nDim = 4;
#ifdef DYNAMIC_CLOVER
constexpr bool dynamic_clover = true;
#else
constexpr bool dynamic_clover = false;
#endif
WilsonCloverArg<Float, nColor, recon, dynamic_clover> arg(out, in, U, A, a, x, parity, dagger, comm_override);
WilsonCloverPreconditioned<Float, nDim, nColor, WilsonCloverArg<Float, nColor, recon, dynamic_clover>> wilson(
arg, out, in);
dslash::DslashPolicyTune<decltype(wilson)> policy(wilson,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the preconditioned Wilson-clover operator
// out(x) = M*in = a * A(x)^{-1} (\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
// Uses the kappa normalization for the Wilson operator.
void ApplyWilsonCloverPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
#ifdef GPU_CLOVER_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, A);
// check all locations match
checkLocation(out, in, U, A);
instantiate<WilsonCloverPreconditionedApply>(out, in, U, A, a, x, parity, dagger, comm_override, profile);
#else
errorQuda("Clover dslash has not been built");
#endif
}
} // namespace quda
|
e71a81fe27a92d6c38e080ee4a6acb5ad02c9a42.cu
|
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <clover_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_wilson_clover_preconditioned.cuh>
/**
This is the Wilson-clover preconditioned linear operator
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct WilsonCloverPreconditionedLaunch {
static constexpr const char *kernel = "quda::wilsonCloverPreconditionedGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream)
{
static_assert(nParity == 1, "preconditioned wilson-clover operator only defined for nParity=1");
if (xpay && dagger) errorQuda("xpay operator only defined for not dagger");
dslash.launch(wilsonCloverPreconditionedGPU < Float, nDim, nColor, nParity, dagger && !xpay, xpay && !dagger,
kernel_type, Arg >, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class WilsonCloverPreconditioned : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
WilsonCloverPreconditioned(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_wilson_clover_preconditioned.cuh"),
arg(arg),
in(in)
{
}
virtual ~WilsonCloverPreconditioned() {}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
if (arg.nParity == 1) {
if (arg.xpay)
Dslash<Float>::template instantiate<WilsonCloverPreconditionedLaunch, nDim, nColor, 1, true>(tp, arg, stream);
else
Dslash<Float>::template instantiate<WilsonCloverPreconditionedLaunch, nDim, nColor, 1, false>(tp, arg, stream);
} else {
errorQuda("Preconditioned Wilson-clover operator not defined nParity=%d", arg.nParity);
}
}
long long flops() const
{
int clover_flops = 504;
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops += clover_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
flops += clover_flops * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
flops += clover_flops * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops -= clover_flops * ghost_sites;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0);
long long bytes = Dslash<Float>::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes += clover_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL:
bytes += clover_bytes * 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
bytes += clover_bytes * in.Volume();
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes -= clover_bytes * ghost_sites;
break;
}
return bytes;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverPreconditionedApply {
inline WilsonCloverPreconditionedApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
constexpr int nDim = 4;
#ifdef DYNAMIC_CLOVER
constexpr bool dynamic_clover = true;
#else
constexpr bool dynamic_clover = false;
#endif
WilsonCloverArg<Float, nColor, recon, dynamic_clover> arg(out, in, U, A, a, x, parity, dagger, comm_override);
WilsonCloverPreconditioned<Float, nDim, nColor, WilsonCloverArg<Float, nColor, recon, dynamic_clover>> wilson(
arg, out, in);
dslash::DslashPolicyTune<decltype(wilson)> policy(wilson,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the preconditioned Wilson-clover operator
// out(x) = M*in = a * A(x)^{-1} (\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
// Uses the kappa normalization for the Wilson operator.
void ApplyWilsonCloverPreconditioned(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
#ifdef GPU_CLOVER_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, A);
// check all locations match
checkLocation(out, in, U, A);
instantiate<WilsonCloverPreconditionedApply>(out, in, U, A, a, x, parity, dagger, comm_override, profile);
#else
errorQuda("Clover dslash has not been built");
#endif
}
} // namespace quda
|
8c4a5c1646da10d0a0cb22da34fd04f51a6d726f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//const int OPT_N = 4000000;
#ifdef __DEVICE_EMULATION__
const int NUM_ITERATIONS = 1;
#else
const int NUM_ITERATIONS = 512;
#endif
//const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[BlackScholes]\n");
// Start logs
shrSetLogFileName ("BlackScholes.txt");
shrLog("%s Starting...\n\n", argv[0]);
const int OPT_N = atoi(argv[1]);
const int OPT_SZ = OPT_N * sizeof(float);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
unsigned int hTimer;
int i;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data...\n");
shrLog("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
shrLog("...allocating GPU memory for options.\n");
cutilSafeCall( hipMalloc((void **)&d_CallResult, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_PutResult, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_StockPrice, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_OptionStrike, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_OptionYears, OPT_SZ) );
shrLog("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for(i = 0; i < OPT_N; i++){
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
shrLog("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
cutilSafeCall( hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice) );
shrLog("Data init done.\n\n");
shrLog("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
// cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(i = 0; i < NUM_ITERATIONS; i++){
hipLaunchKernelGGL(( BlackScholesGPU), dim3(480), dim3(128), 0, 0,
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
cutilCheckMsg("BlackScholesGPU() execution failed\n");
}
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
shrLog("Options count : %i \n", 2 * OPT_N);
shrLog("BlackScholesGPU() time : %f msec\n", gpuTime);
shrLog("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
shrLog("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
shrLogEx(LOGBOTH | MASTER, 0, "BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
shrLog("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
cutilSafeCall( hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost) );
shrLog("Checking the results...\n");
shrLog("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
shrLog("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for(i = 0; i < OPT_N; i++){
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if(delta > max_delta) max_delta = delta;
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
shrLog("L1 norm: %E\n", L1norm);
shrLog("Max absolute error: %E\n\n", max_delta);
shrLog("Shutting down...\n");
shrLog("...releasing GPU memory.\n");
cutilSafeCall( hipFree(d_OptionYears) );
cutilSafeCall( hipFree(d_OptionStrike) );
cutilSafeCall( hipFree(d_StockPrice) );
cutilSafeCall( hipFree(d_PutResult) );
cutilSafeCall( hipFree(d_CallResult) );
shrLog("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
shrLog("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
shrLog((L1norm < 1e-6) ? "PASSED\n\n" : "FAILED\n\n");
hipDeviceReset();
shrEXIT(argc, (const char**)argv);
}
|
8c4a5c1646da10d0a0cb22da34fd04f51a6d726f.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//const int OPT_N = 4000000;
#ifdef __DEVICE_EMULATION__
const int NUM_ITERATIONS = 1;
#else
const int NUM_ITERATIONS = 512;
#endif
//const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[BlackScholes]\n");
// Start logs
shrSetLogFileName ("BlackScholes.txt");
shrLog("%s Starting...\n\n", argv[0]);
const int OPT_N = atoi(argv[1]);
const int OPT_SZ = OPT_N * sizeof(float);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
unsigned int hTimer;
int i;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data...\n");
shrLog("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
shrLog("...allocating GPU memory for options.\n");
cutilSafeCall( cudaMalloc((void **)&d_CallResult, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_PutResult, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_StockPrice, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_OptionStrike, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_OptionYears, OPT_SZ) );
shrLog("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for(i = 0; i < OPT_N; i++){
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
shrLog("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
cutilSafeCall( cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice) );
shrLog("Data init done.\n\n");
shrLog("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
// cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(i = 0; i < NUM_ITERATIONS; i++){
BlackScholesGPU<<<480, 128>>>(
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
cutilCheckMsg("BlackScholesGPU() execution failed\n");
}
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
shrLog("Options count : %i \n", 2 * OPT_N);
shrLog("BlackScholesGPU() time : %f msec\n", gpuTime);
shrLog("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
shrLog("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
shrLogEx(LOGBOTH | MASTER, 0, "BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
shrLog("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
cutilSafeCall( cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost) );
shrLog("Checking the results...\n");
shrLog("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
shrLog("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for(i = 0; i < OPT_N; i++){
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if(delta > max_delta) max_delta = delta;
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
shrLog("L1 norm: %E\n", L1norm);
shrLog("Max absolute error: %E\n\n", max_delta);
shrLog("Shutting down...\n");
shrLog("...releasing GPU memory.\n");
cutilSafeCall( cudaFree(d_OptionYears) );
cutilSafeCall( cudaFree(d_OptionStrike) );
cutilSafeCall( cudaFree(d_StockPrice) );
cutilSafeCall( cudaFree(d_PutResult) );
cutilSafeCall( cudaFree(d_CallResult) );
shrLog("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
shrLog("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
shrLog((L1norm < 1e-6) ? "PASSED\n\n" : "FAILED\n\n");
cudaThreadExit();
shrEXIT(argc, (const char**)argv);
}
|
72647f8d2ddcdccc7001180108f520551dd4544f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define REARRANGED_DOMAIN
__global__ void _manning_friction_flat(
int N,
double g,
double eps, // minimum_allowed_height
double* w, // stage_centroid_values
double* zv, // elevation_vertex_values
double* uh, // xmom_centroid_values
double* vh, // ymom_centroid_values
double* eta,// friction_centroid_values
double* xmom,//xmom_semi_implicit_update
double* ymom)//ymom_semi_implicit_update
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3;
#endif
double S, h, z, z0, z1, z2;
//for (k = 0; k < N; k++) {
if (eta[k] > eps) {
#ifndef REARRANGED_DOMAIN
k3 = 3 * k;
z0 = zv[k3 + 0];
z1 = zv[k3 + 1];
z2 = zv[k3 + 2];
#else
z0 = zv[k];
z1 = zv[k + N];
z2 = zv[k + 2*N];
#endif
z = (z0 + z1 + z2) / 3.0;
h = w[k] - z;
if (h >= eps) {
S = -g *eta[k] * eta[k] * sqrt((uh[k] * uh[k] + vh[k] * vh[k]));
S /= pow(h, 7.0 / 3); //Expensive (on Ole's home computer)
//seems to save about 15% over manning_friction
//S /= exp((7.0/3.0)*log(h));
//FIXME: Could use a Taylor expansion
//S /= h*h*(1 + h/3.0 - h*h/9.0);
//Update momentum
xmom[k] += S * uh[k];
ymom[k] += S * vh[k];
}
}
}
__global__ void _manning_friction_sloped(
int N,
double g,
double eps, // minimum_allowed_height
double* x, // vertex_coordinates
double* w, // stage_centroid_values
double* zv, // elevation_vertex_values
double* uh, // xmom_centroid_values
double* vh, // ymom_centroid_values
double* eta,// friction_centroid_values
double* xmom_update, // xmom_semi_implicit_update
double* ymom_update) // ymom_semi_implicit_update
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3, k6;
#endif
double S, h, z, z0, z1, z2, zs, zx, zy;
double x0, y0, x1, y1, x2, y2;
double det;
//for (k = 0; k < N; k++) {
if (eta[k] > eps) {
#ifndef REARRANGED_DOMAIN
k3 = 3 * k;
z0 = zv[k3 + 0];
z1 = zv[k3 + 1];
z2 = zv[k3 + 2];
// Compute bed slope
k6 = 6 * k; // base index
x0 = x[k6 + 0];
y0 = x[k6 + 1];
x1 = x[k6 + 2];
y1 = x[k6 + 3];
x2 = x[k6 + 4];
y2 = x[k6 + 5];
#else
z0 = zv[k];
z1 = zv[k + N];
z2 = zv[k + 2*N];
// Compute bed slope
x0 = x[k];
y0 = x[k + N];
x1 = x[k + 2*N];
y1 = x[k + 3*N];
x2 = x[k + 4*N];
y2 = x[k + 5*N];
#endif
//_gradient(x0, y0, x1, y1, x2, y2, z0, z1, z2, &zx, &zy);
det = (y2-y0)*(x1-x0) - (y1-y0)*(x2-x0);
zx = (y2-y0)*(z1-z0) - (y1-y0)*(z2-z0);
zx /= det;
zy = (x1-x0)*(z2-z0) - (x2-x0)*(z1-z0);
zy /= det;
zs = sqrt(1.0 + zx * zx + zy * zy);
z = (z0 + z1 + z2) / 3.0;
h = w[k] - z;
if (h >= eps) {
S =-g *eta[k] *eta[k] *zs *sqrt((uh[k] *uh[k] + vh[k] * vh[k]));
S /= pow(h, 7.0 / 3); //Expensive (on Ole's home computer)
//S /= exp((7.0/3.0)*log(h));
//seems to save about 15% over manning_friction
//S /= h*h*(1 + h/3.0 - h*h/9.0);
//FIXME: Could use a Taylor expansion
//Update momentum
xmom_update[k] += S * uh[k];
ymom_update[k] += S * vh[k];
}
}
//}
}
|
72647f8d2ddcdccc7001180108f520551dd4544f.cu
|
//#define REARRANGED_DOMAIN
__global__ void _manning_friction_flat(
int N,
double g,
double eps, // minimum_allowed_height
double* w, // stage_centroid_values
double* zv, // elevation_vertex_values
double* uh, // xmom_centroid_values
double* vh, // ymom_centroid_values
double* eta,// friction_centroid_values
double* xmom,//xmom_semi_implicit_update
double* ymom)//ymom_semi_implicit_update
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3;
#endif
double S, h, z, z0, z1, z2;
//for (k = 0; k < N; k++) {
if (eta[k] > eps) {
#ifndef REARRANGED_DOMAIN
k3 = 3 * k;
z0 = zv[k3 + 0];
z1 = zv[k3 + 1];
z2 = zv[k3 + 2];
#else
z0 = zv[k];
z1 = zv[k + N];
z2 = zv[k + 2*N];
#endif
z = (z0 + z1 + z2) / 3.0;
h = w[k] - z;
if (h >= eps) {
S = -g *eta[k] * eta[k] * sqrt((uh[k] * uh[k] + vh[k] * vh[k]));
S /= pow(h, 7.0 / 3); //Expensive (on Ole's home computer)
//seems to save about 15% over manning_friction
//S /= exp((7.0/3.0)*log(h));
//FIXME: Could use a Taylor expansion
//S /= h*h*(1 + h/3.0 - h*h/9.0);
//Update momentum
xmom[k] += S * uh[k];
ymom[k] += S * vh[k];
}
}
}
__global__ void _manning_friction_sloped(
int N,
double g,
double eps, // minimum_allowed_height
double* x, // vertex_coordinates
double* w, // stage_centroid_values
double* zv, // elevation_vertex_values
double* uh, // xmom_centroid_values
double* vh, // ymom_centroid_values
double* eta,// friction_centroid_values
double* xmom_update, // xmom_semi_implicit_update
double* ymom_update) // ymom_semi_implicit_update
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k3, k6;
#endif
double S, h, z, z0, z1, z2, zs, zx, zy;
double x0, y0, x1, y1, x2, y2;
double det;
//for (k = 0; k < N; k++) {
if (eta[k] > eps) {
#ifndef REARRANGED_DOMAIN
k3 = 3 * k;
z0 = zv[k3 + 0];
z1 = zv[k3 + 1];
z2 = zv[k3 + 2];
// Compute bed slope
k6 = 6 * k; // base index
x0 = x[k6 + 0];
y0 = x[k6 + 1];
x1 = x[k6 + 2];
y1 = x[k6 + 3];
x2 = x[k6 + 4];
y2 = x[k6 + 5];
#else
z0 = zv[k];
z1 = zv[k + N];
z2 = zv[k + 2*N];
// Compute bed slope
x0 = x[k];
y0 = x[k + N];
x1 = x[k + 2*N];
y1 = x[k + 3*N];
x2 = x[k + 4*N];
y2 = x[k + 5*N];
#endif
//_gradient(x0, y0, x1, y1, x2, y2, z0, z1, z2, &zx, &zy);
det = (y2-y0)*(x1-x0) - (y1-y0)*(x2-x0);
zx = (y2-y0)*(z1-z0) - (y1-y0)*(z2-z0);
zx /= det;
zy = (x1-x0)*(z2-z0) - (x2-x0)*(z1-z0);
zy /= det;
zs = sqrt(1.0 + zx * zx + zy * zy);
z = (z0 + z1 + z2) / 3.0;
h = w[k] - z;
if (h >= eps) {
S =-g *eta[k] *eta[k] *zs *sqrt((uh[k] *uh[k] + vh[k] * vh[k]));
S /= pow(h, 7.0 / 3); //Expensive (on Ole's home computer)
//S /= exp((7.0/3.0)*log(h));
//seems to save about 15% over manning_friction
//S /= h*h*(1 + h/3.0 - h*h/9.0);
//FIXME: Could use a Taylor expansion
//Update momentum
xmom_update[k] += S * uh[k];
ymom_update[k] += S * vh[k];
}
}
//}
}
|
b33b3b7cc926cc876285f1fc68b9a250a3e914bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
const int INF = ((1 << 30) - 1);
const int V = 20000;
int n, m;
int *d_Dist, **d_Dist_internal;
int *d_n, *d_m;
int ceil(int x, int y) {return (x + y - 1) / y;}
__global__ void phase1(int round, int n, int V, int* Dist, int B);
__global__ void phase2(int round, int n, int V, int* Dist, int B);
__global__ void phase3(int round, int n, int V, int* Dist, int B);
// Shared Memory
extern __shared__ int SM[];
// Distance Matrix (Global Memory)
int Dist[V][V];
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF) Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int main(int argc, char* argv[]){
input(argv[1]);
// Initialize for two dimension array (faster than initialize pointer)
hipMalloc((void **)&d_Dist, V * V * sizeof(int));
hipMalloc((void **)&d_n, sizeof(int));
hipMalloc((void **)&d_m, sizeof(int));
hipMemcpy(d_Dist, Dist, V * V * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_m, &m, sizeof(int), hipMemcpyHostToDevice);
int thread_in_bank = 32; //1024 = 32 * 32
int num_round = ceil(n, thread_in_bank);
dim3 first(1);
dim3 second(num_round - 1, 2);
dim3 third(num_round - 1, num_round - 1);
dim3 bk(thread_in_bank, thread_in_bank);
// int num_threads = 32;
for (int rou = 0; rou < num_round; rou ++){
hipLaunchKernelGGL(( phase1), dim3(first), dim3(bk), thread_in_bank * thread_in_bank * sizeof(int), 0, rou, n, V, d_Dist, thread_in_bank);
hipLaunchKernelGGL(( phase2), dim3(second), dim3(bk), 2 * thread_in_bank * thread_in_bank *sizeof(int), 0, rou, n, V, d_Dist, thread_in_bank);
hipLaunchKernelGGL(( phase3), dim3(third), dim3(bk), 2 * thread_in_bank * thread_in_bank * sizeof(int), 0, rou, n, V, d_Dist, thread_in_bank);
}
hipMemcpy(Dist, d_Dist, V * V * sizeof(int), hipMemcpyHostToDevice);
output(argv[2]);
return 0;
}
__global__ void phase1(int round, int n, int V, int* Dist, int B){
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = round * B + shared_i;
int j = round * B + shared_j;
// Copy From Global Memory To Shared Memory
if(i < n && j < n){
SM[shared_i * B + shared_j] = Dist[i * V + j];
}
__syncthreads();
int t_temp = round * B;
int s_temp = shared_i * B;
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (SM[s_temp + m] + SM[m * B + shared_j] < SM[s_temp + shared_j]){
SM[s_temp + shared_j] = SM[s_temp + m] + SM[m * B + shared_j];
}
__syncthreads();
}
// Copy Back To Global Memory
if (i < n && j < n){
Dist[i * V + j] = SM[s_temp + shared_j];
}
__syncthreads();
}
__global__ void phase2(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round) return;
int* pivot = &SM[0];
int* S_dist = &SM[B * B];
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = round * B + shared_i;
int j = round * B + shared_j;
int s_temp = shared_i * B;
if (i < n && j < n){
pivot[s_temp + shared_j] = Dist[i * V + j];
}
__syncthreads();
if (blockIdx.y == 0){
j = blockIdx.x * B + shared_j;
}else{
i = blockIdx.x * B + shared_i;
}
if (i >= n || j >= n) return;
S_dist[s_temp + shared_j] = Dist[i * V + j];
__syncthreads();
int t_temp = round * B;
if (blockIdx.y == 1){
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (S_dist[s_temp + m] + pivot[m * B + shared_j] < S_dist[s_temp + shared_j]){
S_dist[s_temp + shared_j] = S_dist[s_temp + m] + pivot[m * B + shared_j];
}
}
}else{
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (pivot[s_temp + m] + S_dist[m * B + shared_j] < S_dist[s_temp + shared_j]){
S_dist[s_temp + shared_j] = pivot[s_temp + m] + S_dist[m * B + shared_j];
}
}
}
if (i < n && j < n){
Dist[i * V + j] = S_dist[s_temp + shared_j];
}
__syncthreads();
}
__global__ void phase3(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round || blockIdx.y == round) return;
int* pivot_row = &SM[0];
int* pivot_col = &SM[B * B];
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = blockIdx.y * B + shared_i;
int j = blockIdx.x * B + shared_j;
int block_i = round * B + shared_i;
int block_j = round * B + shared_j;
int s_temp = shared_i * B;
if (i < n && block_j < n){
pivot_row[s_temp + shared_j] = Dist[i * V + block_j];
}
if (j < n && block_i < n){
pivot_col[s_temp + shared_j] = Dist[block_i * V + j];
}
__syncthreads();
if (i >= n || j >= n) return;
int distance = Dist[i * V + j];
int t_temp = round * B;
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (pivot_row[s_temp + m] + pivot_col[m * B + shared_j] < distance){
distance = pivot_row[s_temp + m] + pivot_col[m * B + shared_j];
}
}
Dist[i * V + j] = distance;
__syncthreads();
}
|
b33b3b7cc926cc876285f1fc68b9a250a3e914bd.cu
|
#include <stdio.h>
#include <stdlib.h>
const int INF = ((1 << 30) - 1);
const int V = 20000;
int n, m;
int *d_Dist, **d_Dist_internal;
int *d_n, *d_m;
int ceil(int x, int y) {return (x + y - 1) / y;}
__global__ void phase1(int round, int n, int V, int* Dist, int B);
__global__ void phase2(int round, int n, int V, int* Dist, int B);
__global__ void phase3(int round, int n, int V, int* Dist, int B);
// Shared Memory
extern __shared__ int SM[];
// Distance Matrix (Global Memory)
int Dist[V][V];
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF) Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int main(int argc, char* argv[]){
input(argv[1]);
// Initialize for two dimension array (faster than initialize pointer)
cudaMalloc((void **)&d_Dist, V * V * sizeof(int));
cudaMalloc((void **)&d_n, sizeof(int));
cudaMalloc((void **)&d_m, sizeof(int));
cudaMemcpy(d_Dist, Dist, V * V * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_m, &m, sizeof(int), cudaMemcpyHostToDevice);
int thread_in_bank = 32; //1024 = 32 * 32
int num_round = ceil(n, thread_in_bank);
dim3 first(1);
dim3 second(num_round - 1, 2);
dim3 third(num_round - 1, num_round - 1);
dim3 bk(thread_in_bank, thread_in_bank);
// int num_threads = 32;
for (int rou = 0; rou < num_round; rou ++){
phase1<<<first, bk, thread_in_bank * thread_in_bank * sizeof(int)>>>(rou, n, V, d_Dist, thread_in_bank);
phase2<<<second, bk, 2 * thread_in_bank * thread_in_bank *sizeof(int)>>>(rou, n, V, d_Dist, thread_in_bank);
phase3<<<third, bk, 2 * thread_in_bank * thread_in_bank * sizeof(int)>>>(rou, n, V, d_Dist, thread_in_bank);
}
cudaMemcpy(Dist, d_Dist, V * V * sizeof(int), cudaMemcpyHostToDevice);
output(argv[2]);
return 0;
}
__global__ void phase1(int round, int n, int V, int* Dist, int B){
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = round * B + shared_i;
int j = round * B + shared_j;
// Copy From Global Memory To Shared Memory
if(i < n && j < n){
SM[shared_i * B + shared_j] = Dist[i * V + j];
}
__syncthreads();
int t_temp = round * B;
int s_temp = shared_i * B;
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (SM[s_temp + m] + SM[m * B + shared_j] < SM[s_temp + shared_j]){
SM[s_temp + shared_j] = SM[s_temp + m] + SM[m * B + shared_j];
}
__syncthreads();
}
// Copy Back To Global Memory
if (i < n && j < n){
Dist[i * V + j] = SM[s_temp + shared_j];
}
__syncthreads();
}
__global__ void phase2(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round) return;
int* pivot = &SM[0];
int* S_dist = &SM[B * B];
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = round * B + shared_i;
int j = round * B + shared_j;
int s_temp = shared_i * B;
if (i < n && j < n){
pivot[s_temp + shared_j] = Dist[i * V + j];
}
__syncthreads();
if (blockIdx.y == 0){
j = blockIdx.x * B + shared_j;
}else{
i = blockIdx.x * B + shared_i;
}
if (i >= n || j >= n) return;
S_dist[s_temp + shared_j] = Dist[i * V + j];
__syncthreads();
int t_temp = round * B;
if (blockIdx.y == 1){
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (S_dist[s_temp + m] + pivot[m * B + shared_j] < S_dist[s_temp + shared_j]){
S_dist[s_temp + shared_j] = S_dist[s_temp + m] + pivot[m * B + shared_j];
}
}
}else{
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (pivot[s_temp + m] + S_dist[m * B + shared_j] < S_dist[s_temp + shared_j]){
S_dist[s_temp + shared_j] = pivot[s_temp + m] + S_dist[m * B + shared_j];
}
}
}
if (i < n && j < n){
Dist[i * V + j] = S_dist[s_temp + shared_j];
}
__syncthreads();
}
__global__ void phase3(int round, int n, int V, int* Dist, int B){
if (blockIdx.x == round || blockIdx.y == round) return;
int* pivot_row = &SM[0];
int* pivot_col = &SM[B * B];
int shared_i = threadIdx.y;
int shared_j = threadIdx.x;
int i = blockIdx.y * B + shared_i;
int j = blockIdx.x * B + shared_j;
int block_i = round * B + shared_i;
int block_j = round * B + shared_j;
int s_temp = shared_i * B;
if (i < n && block_j < n){
pivot_row[s_temp + shared_j] = Dist[i * V + block_j];
}
if (j < n && block_i < n){
pivot_col[s_temp + shared_j] = Dist[block_i * V + j];
}
__syncthreads();
if (i >= n || j >= n) return;
int distance = Dist[i * V + j];
int t_temp = round * B;
#pragma unroll
for (int m = 0; m < B && t_temp + m < n; m++){
if (pivot_row[s_temp + m] + pivot_col[m * B + shared_j] < distance){
distance = pivot_row[s_temp + m] + pivot_col[m * B + shared_j];
}
}
Dist[i * V + j] = distance;
__syncthreads();
}
|
3267b86d2d69e956e6dd6997f6bc3027f2c820eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "simulation.h"
__global__ void initialize(neutronInfo Info, double energy){
//int id = ((blockDim.x*blockDim.y*blockDim.z)*(blockIdx.y*gridDim.x+blockIdx.x)+(blockDim.x*blockDim.y)*threadIdx.z+blockDim.x*threadIdx.y+threadIdx.x);//THREADID;
int id = blockDim.x * blockIdx.x + threadIdx.x;
Info.energy[id] = energy; //id+1.0; //(id + 1)*1.63*energy*0.001;//
}
__global__ void history(multipole U238, double *devicearray, struct neutronInfo Info){
//TODO:this is one scheme to match threads to 1D array,
//try others when real simulation structure becomes clear
int id = blockDim.x * blockIdx.x + threadIdx.x;//THREADID;
bool live=true;
double localenergy;
double rnd;
double sigT, sigA, sigF;
struct pointers sharedptr;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(1234, id, 0, &Info.rndState[id]);
/* Copy state to local memory for efficiency */
hiprandState_t localState = Info.rndState[id];
localenergy = Info.energy[id];
unsigned cnt = 0;
unsigned idl = threadIdx.x;
unsigned blocksize = blockDim.x;
/*
shift shared memory for double twophi[MAXNUML] and complex sigT_factor[MAXNUML]
*/
//TODO: tailor to accomodate more than two isotopes
sharedptr.blockbase = Info.share.blockbase;
sharedptr.sigT_factor = (CComplex*)(shared) + idl;
/*
//sharedptr.w_start = (unsigned*)(shared + blocksize + (blocksize<<2)*Info.share.numL);
//sharedptr.w_end = sharedptr.w_start + Info.share.windows;
cnt = idl;
while(cnt<Info.share.windows){
sharedptr.w_start[cnt] = U238.w_start[cnt];
sharedptr.w_end[cnt] = U238.w_end[cnt];
cnt += blocksize;
}
//__syncthreads();
//sharedptr.dev_doubles = (double*)(shared + blocksize + (blocksize<<2)*Info.share.numL);
//(double*)(sharedptr.w_end + Info.share.windows);
if(3>idl){
// for(cnt=0;cnt<3;cnt++)
sharedptr.dev_doubles[idl] = U238.dev_doubles[idl];
}
//__syncthreads();
sharedptr.pseudo_rho = sharedptr.dev_doubles + 3;
if(idl<Info.share.numL)
sharedptr.pseudo_rho[idl] = U238.pseudo_rho[idl];
//__syncthreads();
sharedptr.dev_integers = (unsigned*)(sharedptr.pseudo_rho + Info.share.numL);
if(idl<4)
sharedptr.dev_integers[idl] = U238.dev_integers[idl];
__syncthreads();
*/
cnt = 0;
while(live){
rnd = hiprand_uniform(&localState);
U238.xs_eval_fast(localenergy, sqrt(300.0*KB), sigT, sigA, sigF, sharedptr);
localenergy = localenergy * rnd;
live = (localenergy>1.0);
cnt = cnt + 1;
//live = false;
}
devicearray[4*id]=localenergy/rnd;
devicearray[4*id+1]=sigT;
devicearray[4*id+2]=sigA;
devicearray[4*id+3]=sigF;
/* Copy state back to global memory */
//Info.rndState[id] = localState;
/*reduce tally*/
__syncthreads();
unsigned *tally = (unsigned*)(shared);
int i;
tally[idl] = cnt;
__syncthreads();
i = blocksize>>1;
while(i){
if(idl<i)
tally[idl] += tally[idl+i];
__syncthreads();
i=i>>1;
}
if(0==idl){
//reduction scheme depends on tally type
//following is to count moderation times
Info.ntally.cnt[blockIdx.x] = tally[0];
}
}
|
3267b86d2d69e956e6dd6997f6bc3027f2c820eb.cu
|
#include "simulation.h"
__global__ void initialize(neutronInfo Info, double energy){
//int id = ((blockDim.x*blockDim.y*blockDim.z)*(blockIdx.y*gridDim.x+blockIdx.x)+(blockDim.x*blockDim.y)*threadIdx.z+blockDim.x*threadIdx.y+threadIdx.x);//THREADID;
int id = blockDim.x * blockIdx.x + threadIdx.x;
Info.energy[id] = energy; //id+1.0; //(id + 1)*1.63*energy*0.001;//
}
__global__ void history(multipole U238, double *devicearray, struct neutronInfo Info){
//TODO:this is one scheme to match threads to 1D array,
//try others when real simulation structure becomes clear
int id = blockDim.x * blockIdx.x + threadIdx.x;//THREADID;
bool live=true;
double localenergy;
double rnd;
double sigT, sigA, sigF;
struct pointers sharedptr;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(1234, id, 0, &Info.rndState[id]);
/* Copy state to local memory for efficiency */
curandState localState = Info.rndState[id];
localenergy = Info.energy[id];
unsigned cnt = 0;
unsigned idl = threadIdx.x;
unsigned blocksize = blockDim.x;
/*
shift shared memory for double twophi[MAXNUML] and complex sigT_factor[MAXNUML]
*/
//TODO: tailor to accomodate more than two isotopes
sharedptr.blockbase = Info.share.blockbase;
sharedptr.sigT_factor = (CComplex*)(shared) + idl;
/*
//sharedptr.w_start = (unsigned*)(shared + blocksize + (blocksize<<2)*Info.share.numL);
//sharedptr.w_end = sharedptr.w_start + Info.share.windows;
cnt = idl;
while(cnt<Info.share.windows){
sharedptr.w_start[cnt] = U238.w_start[cnt];
sharedptr.w_end[cnt] = U238.w_end[cnt];
cnt += blocksize;
}
//__syncthreads();
//sharedptr.dev_doubles = (double*)(shared + blocksize + (blocksize<<2)*Info.share.numL);
//(double*)(sharedptr.w_end + Info.share.windows);
if(3>idl){
// for(cnt=0;cnt<3;cnt++)
sharedptr.dev_doubles[idl] = U238.dev_doubles[idl];
}
//__syncthreads();
sharedptr.pseudo_rho = sharedptr.dev_doubles + 3;
if(idl<Info.share.numL)
sharedptr.pseudo_rho[idl] = U238.pseudo_rho[idl];
//__syncthreads();
sharedptr.dev_integers = (unsigned*)(sharedptr.pseudo_rho + Info.share.numL);
if(idl<4)
sharedptr.dev_integers[idl] = U238.dev_integers[idl];
__syncthreads();
*/
cnt = 0;
while(live){
rnd = curand_uniform(&localState);
U238.xs_eval_fast(localenergy, sqrt(300.0*KB), sigT, sigA, sigF, sharedptr);
localenergy = localenergy * rnd;
live = (localenergy>1.0);
cnt = cnt + 1;
//live = false;
}
devicearray[4*id]=localenergy/rnd;
devicearray[4*id+1]=sigT;
devicearray[4*id+2]=sigA;
devicearray[4*id+3]=sigF;
/* Copy state back to global memory */
//Info.rndState[id] = localState;
/*reduce tally*/
__syncthreads();
unsigned *tally = (unsigned*)(shared);
int i;
tally[idl] = cnt;
__syncthreads();
i = blocksize>>1;
while(i){
if(idl<i)
tally[idl] += tally[idl+i];
__syncthreads();
i=i>>1;
}
if(0==idl){
//reduction scheme depends on tally type
//following is to count moderation times
Info.ntally.cnt[blockIdx.x] = tally[0];
}
}
|
820ac667a05fb253c2ab084178753c0462e7156f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
820ac667a05fb253c2ab084178753c0462e7156f.cu
|
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
a39d893c470b96caa220c138509f8bef8d892217.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__device__ float dist(float *a, float *b, int i, int j){
return sqrt(pow(a[2*i]-b[2*j], 2.0f)+pow(a[2*i+1]-b[2*j+1], 2.0f));
}
__global__ void NN(
int nz,
float rad,
int zone_leap,
int *zone_num,
int *zone_node,
int snum,
int vnum,
float *sxy,
float *vxy,
int *sv,
float *dst
){
const int s = blockIdx.x*THREADS + threadIdx.x;
if (s>=snum){
return;
}
const int ss = 2*s;
const int za = (int)floor(sxy[ss]*nz);
const int zb = (int)floor(sxy[ss+1]*nz);
float dd = -1.0f;
int v = -4;
int zk;
int r = -3;
float mi = 99999.0f;
int cand_count = 0;
for (int a=max(za-1,0);a<min(za+2,nz);a++){
for (int b=max(zb-1,0);b<min(zb+2,nz);b++){
zk = a*nz+b;
for (int k=0;k<zone_num[zk];k++){
cand_count += 1;
v = zone_node[zk*zone_leap+k];
dd = dist(vxy, sxy, v, s);
if (dd>rad){
continue;
}
if (dd<mi){
mi = dd;
r = v;
}
}
}
}
sv[s] = r;
dst[s] = mi;
}
|
a39d893c470b96caa220c138509f8bef8d892217.cu
|
#define THREADS _THREADS_
__device__ float dist(float *a, float *b, int i, int j){
return sqrt(pow(a[2*i]-b[2*j], 2.0f)+pow(a[2*i+1]-b[2*j+1], 2.0f));
}
__global__ void NN(
int nz,
float rad,
int zone_leap,
int *zone_num,
int *zone_node,
int snum,
int vnum,
float *sxy,
float *vxy,
int *sv,
float *dst
){
const int s = blockIdx.x*THREADS + threadIdx.x;
if (s>=snum){
return;
}
const int ss = 2*s;
const int za = (int)floor(sxy[ss]*nz);
const int zb = (int)floor(sxy[ss+1]*nz);
float dd = -1.0f;
int v = -4;
int zk;
int r = -3;
float mi = 99999.0f;
int cand_count = 0;
for (int a=max(za-1,0);a<min(za+2,nz);a++){
for (int b=max(zb-1,0);b<min(zb+2,nz);b++){
zk = a*nz+b;
for (int k=0;k<zone_num[zk];k++){
cand_count += 1;
v = zone_node[zk*zone_leap+k];
dd = dist(vxy, sxy, v, s);
if (dd>rad){
continue;
}
if (dd<mi){
mi = dd;
r = v;
}
}
}
}
sv[s] = r;
dst[s] = mi;
}
|
6b661b754f7659c884e38cd71b7cf2c58a77a725.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kWriteRows.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
int num_images = 1;
int num_modules = 1;
int num_modules_batch = 2;
int module_id_offset = 1;
float beta = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kWriteRows), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kWriteRows), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kWriteRows), dim3(gridBlock),dim3(threadBlock), 0, 0, data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6b661b754f7659c884e38cd71b7cf2c58a77a725.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kWriteRows.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
int num_images = 1;
int num_modules = 1;
int num_modules_batch = 2;
int module_id_offset = 1;
float beta = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kWriteRows<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kWriteRows<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kWriteRows<<<gridBlock,threadBlock>>>(data,target,num_images,num_modules,num_modules_batch,module_id_offset,beta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
986ce68cb8234daf16106f3dd29dbdfaeceee078.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <pairwise_transform.h>
__device__ double op(double d1,double d2,double *params) {
return d1 + d2;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void add_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *params,double *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
|
986ce68cb8234daf16106f3dd29dbdfaeceee078.cu
|
#include <pairwise_transform.h>
__device__ double op(double d1,double d2,double *params) {
return d1 + d2;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void add_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *params,double *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.